diff --git a/packages/contracts/contracts/rewards/IRewardsIssuer.sol b/packages/contracts/contracts/rewards/IRewardsIssuer.sol index 9da18b92b..2a18b6e3c 100644 --- a/packages/contracts/contracts/rewards/IRewardsIssuer.sol +++ b/packages/contracts/contracts/rewards/IRewardsIssuer.sol @@ -5,7 +5,9 @@ pragma solidity ^0.7.6 || 0.8.27; interface IRewardsIssuer { /** * @dev Get allocation data to calculate rewards issuance + * * @param allocationId The allocation Id + * @return isActive Whether the allocation is active or not * @return indexer The indexer address * @return subgraphDeploymentId Subgraph deployment id for the allocation * @return tokens Amount of allocated tokens @@ -18,6 +20,7 @@ interface IRewardsIssuer { external view returns ( + bool isActive, address indexer, bytes32 subgraphDeploymentId, uint256 tokens, diff --git a/packages/contracts/contracts/rewards/RewardsManager.sol b/packages/contracts/contracts/rewards/RewardsManager.sol index 1517775f9..54dbead81 100644 --- a/packages/contracts/contracts/rewards/RewardsManager.sol +++ b/packages/contracts/contracts/rewards/RewardsManager.sol @@ -316,6 +316,7 @@ contract RewardsManager is RewardsManagerV5Storage, GraphUpgradeable, IRewardsMa /** * @dev Calculate current rewards for a given allocation on demand. * The allocation could be a legacy allocation or a new subgraph service allocation. + * Returns 0 if the allocation is not active. * @param _allocationID Allocation * @return Rewards amount for an allocation */ @@ -326,6 +327,7 @@ contract RewardsManager is RewardsManagerV5Storage, GraphUpgradeable, IRewardsMa ); ( + bool isActive, , bytes32 subgraphDeploymentId, uint256 tokens, @@ -333,6 +335,10 @@ contract RewardsManager is RewardsManagerV5Storage, GraphUpgradeable, IRewardsMa uint256 accRewardsPending ) = IRewardsIssuer(_rewardsIssuer).getAllocationData(_allocationID); + if (!isActive) { + return 0; + } + (uint256 accRewardsPerAllocatedToken, ) = getAccRewardsPerAllocatedToken(subgraphDeploymentId); return accRewardsPending.add(_calcRewards(tokens, alloAccRewardsPerAllocatedToken, accRewardsPerAllocatedToken)); @@ -372,6 +378,7 @@ contract RewardsManager is RewardsManagerV5Storage, GraphUpgradeable, IRewardsMa * This function can only be called by an authorized rewards issuer which are * the staking contract (for legacy allocations), and the subgraph service (for new allocations). * This function will mint the necessary tokens to reward based on the inflation calculation. + * Mints 0 tokens if the allocation is not active. * @param _allocationID Allocation * @return Assigned rewards amount */ @@ -383,6 +390,7 @@ contract RewardsManager is RewardsManagerV5Storage, GraphUpgradeable, IRewardsMa ); ( + bool isActive, address indexer, bytes32 subgraphDeploymentID, uint256 tokens, @@ -398,15 +406,18 @@ contract RewardsManager is RewardsManagerV5Storage, GraphUpgradeable, IRewardsMa return 0; } - // Calculate rewards accrued by this allocation - uint256 rewards = accRewardsPending.add( - _calcRewards(tokens, accRewardsPerAllocatedToken, updatedAccRewardsPerAllocatedToken) - ); - if (rewards > 0) { - // Mint directly to rewards issuer for the reward amount - // The rewards issuer contract will do bookkeeping of the reward and - // assign in proportion to each stakeholder incentive - graphToken().mint(rewardsIssuer, rewards); + uint256 rewards = 0; + if (isActive) { + // Calculate rewards accrued by this allocation + rewards = accRewardsPending.add( + _calcRewards(tokens, accRewardsPerAllocatedToken, updatedAccRewardsPerAllocatedToken) + ); + if (rewards > 0) { + // Mint directly to rewards issuer for the reward amount + // The rewards issuer contract will do bookkeeping of the reward and + // assign in proportion to each stakeholder incentive + graphToken().mint(rewardsIssuer, rewards); + } } emit HorizonRewardsAssigned(indexer, _allocationID, rewards); diff --git a/packages/contracts/contracts/staking/IStakingBase.sol b/packages/contracts/contracts/staking/IStakingBase.sol index f01ca4326..18c421ef1 100644 --- a/packages/contracts/contracts/staking/IStakingBase.sol +++ b/packages/contracts/contracts/staking/IStakingBase.sol @@ -368,7 +368,7 @@ interface IStakingBase is IStakingData { */ function getAllocationData( address _allocationID - ) external view returns (address, bytes32, uint256, uint256, uint256); + ) external view returns (bool, address, bytes32, uint256, uint256, uint256); /** * @dev New function to get the allocation active status for the rewards manager diff --git a/packages/contracts/contracts/staking/Staking.sol b/packages/contracts/contracts/staking/Staking.sol index eaafdee0c..f4c09d79f 100644 --- a/packages/contracts/contracts/staking/Staking.sol +++ b/packages/contracts/contracts/staking/Staking.sol @@ -479,9 +479,18 @@ abstract contract Staking is StakingV4Storage, GraphUpgradeable, IStakingBase, M */ function getAllocationData( address _allocationID - ) external view override returns (address, bytes32, uint256, uint256, uint256) { + ) external view override returns (bool, address, bytes32, uint256, uint256, uint256) { Allocation memory alloc = __allocations[_allocationID]; - return (alloc.indexer, alloc.subgraphDeploymentID, alloc.tokens, alloc.accRewardsPerAllocatedToken, 0); + bool isActive = _getAllocationState(_allocationID) == AllocationState.Active; + + return ( + isActive, + alloc.indexer, + alloc.subgraphDeploymentID, + alloc.tokens, + alloc.accRewardsPerAllocatedToken, + 0 + ); } /** @@ -810,9 +819,6 @@ abstract contract Staking is StakingV4Storage, GraphUpgradeable, IStakingBase, M require(isIndexerOrOperator, "!auth"); } - // Close the allocation - __allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; - // -- Rewards Distribution -- // Process non-zero-allocation rewards tracking @@ -836,6 +842,11 @@ abstract contract Staking is StakingV4Storage, GraphUpgradeable, IStakingBase, M ); } + // Close the allocation + // Note that this breaks CEI pattern. We update after the rewards distribution logic as it expects the allocation + // to still be active. There shouldn't be reentrancy risk here as all internal calls are to trusted contracts. + __allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; + emit AllocationClosed( alloc.indexer, alloc.subgraphDeploymentID, diff --git a/packages/contracts/scripts/build b/packages/contracts/scripts/build index 06b9823c1..b034b0892 100755 --- a/packages/contracts/scripts/build +++ b/packages/contracts/scripts/build @@ -10,8 +10,8 @@ yarn compile tsc # Copy types and abis to distribution folder -cp -R build/types/* dist/build/types -cp -R build/abis/ dist/abis +cp -R build/types/* dist/contracts/build/types +cp -R build/abis/ dist/contracts/build/abis # Move compiled types ts -mv dist/build/types dist/types \ No newline at end of file +mv dist/contracts/build/types dist/types \ No newline at end of file diff --git a/packages/contracts/test/unit/rewards/rewards.test.ts b/packages/contracts/test/unit/rewards/rewards.test.ts index fe5ac75fb..50d598e6e 100644 --- a/packages/contracts/test/unit/rewards/rewards.test.ts +++ b/packages/contracts/test/unit/rewards/rewards.test.ts @@ -21,6 +21,7 @@ import { toGRT, } from '@graphprotocol/sdk' import type { SignerWithAddress } from '@nomiclabs/hardhat-ethers/signers' +import { impersonateAccount, setBalance } from '../../../../sdk/src/helpers' const MAX_PPM = 1000000 @@ -838,6 +839,30 @@ describe('Rewards', () => { .emit(rewardsManager, 'RewardsDenied') .withArgs(indexer1.address, allocationID1) }) + + it('should not distribute rewards if allocation is not active', async function () { + // Setup + await setupIndexerAllocation() + + // Jump and close allocation + await helpers.mineEpoch(epochManager) + await staking.connect(indexer1).closeAllocation(allocationID1, randomHexBytes()) + + // Jump some more + await helpers.mineEpoch(epochManager, 10) + + // Impersonate staking contract + const impersonatedStaking = await impersonateAccount(staking.address) + await setBalance(staking.address, toGRT('1000000')) + + // Distribute rewards + const tx = await rewardsManager.connect(impersonatedStaking).takeRewards(allocationID1) + const receipt = await tx.wait() + const event = rewardsManager.interface.parseLog(receipt.logs[0]).args + expect(event.indexer).eq(indexer1.address) + expect(event.allocationID).eq(allocationID1) + expect(toRound(event.amount)).eq(toRound(BigNumber.from(0))) + }) }) }) diff --git a/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol b/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol index 875752df1..a1c38a99a 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol @@ -63,6 +63,9 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat /** * @notice Releases expired stake claims for a service provider. * @dev This function can be overriden and/or disabled. + * @dev Note that the list is traversed by creation date not by releasableAt date. Traversing will stop + * when the first stake claim that is not yet expired is found even if later stake claims have expired. This + * could happen if stake claims are genereted with different unlock periods. * @dev Emits a {StakeClaimsReleased} event, and a {StakeClaimReleased} event for each claim released. * @param _serviceProvider The address of the service provider * @param _numClaimsToRelease Amount of stake claims to process. If 0, all stake claims are processed. @@ -85,7 +88,7 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat * @dev This function is used as a callback in the stake claims linked list traversal. * @param _claimId The id of the stake claim * @param _acc The accumulator for the stake claims being processed - * @return Wether the stake claim is still locked, indicating that the traversal should continue or stop. + * @return Whether the stake claim is still locked, indicating that the traversal should continue or stop. * @return The updated accumulator data */ function _processStakeClaim(bytes32 _claimId, bytes memory _acc) private returns (bool, bytes memory) { diff --git a/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol b/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol index 03d9d55e5..30d1aa4ee 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol @@ -11,6 +11,7 @@ import { LinkedList } from "../../libraries/LinkedList.sol"; * bugs. We may have an active bug bounty program. */ abstract contract DataServiceFeesV1Storage { + /// @notice The amount of tokens locked in stake claims for each service provider mapping(address serviceProvider => uint256 tokens) public feesProvisionTracker; /// @notice List of all locked stake claims to be released to service providers diff --git a/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol b/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol index 3a42bc8f4..16cd26885 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol @@ -32,12 +32,12 @@ abstract contract DataServicePausable is Pausable, DataService, IDataServicePaus } /// @inheritdoc IDataServicePausable - function pause() external override onlyPauseGuardian whenNotPaused { + function pause() external override onlyPauseGuardian { _pause(); } /// @inheritdoc IDataServicePausable - function unpause() external override onlyPauseGuardian whenPaused { + function unpause() external override onlyPauseGuardian { _unpause(); } diff --git a/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol b/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol index 6946a0091..da4d8fd77 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol @@ -31,12 +31,12 @@ abstract contract DataServicePausableUpgradeable is PausableUpgradeable, DataSer } /// @inheritdoc IDataServicePausable - function pause() external override onlyPauseGuardian whenNotPaused { + function pause() external override onlyPauseGuardian { _pause(); } /// @inheritdoc IDataServicePausable - function unpause() external override onlyPauseGuardian whenPaused { + function unpause() external override onlyPauseGuardian { _unpause(); } diff --git a/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol b/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol index 3ead88a09..699394c8d 100644 --- a/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol +++ b/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol @@ -19,6 +19,10 @@ import { ProvisionManagerV1Storage } from "./ProvisionManagerStorage.sol"; * The parameters are: * - Provision parameters (thawing period and verifier cut) * - Provision tokens + * + * Note that default values for all provision parameters provide the most permissive configuration, it's + * highly recommended to override them at the data service level. + * * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ @@ -43,6 +47,9 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa /// @notice The default maximum provision tokens. uint256 internal constant DEFAULT_MAX_PROVISION_TOKENS = type(uint256).max; + /// @notice The default delegation ratio. + uint32 internal constant DEFAULT_DELEGATION_RATIO = type(uint32).max; + /** * @notice Emitted when the provision tokens range is set. * @param min The minimum allowed value for the provision tokens. @@ -138,6 +145,7 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa _setProvisionTokensRange(DEFAULT_MIN_PROVISION_TOKENS, DEFAULT_MAX_PROVISION_TOKENS); _setVerifierCutRange(DEFAULT_MIN_VERIFIER_CUT, DEFAULT_MAX_VERIFIER_CUT); _setThawingPeriodRange(DEFAULT_MIN_THAWING_PERIOD, DEFAULT_MAX_THAWING_PERIOD); + _setDelegationRatio(DEFAULT_DELEGATION_RATIO); } /** @@ -145,8 +153,6 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa * the {HorizonStaking} contract. * @dev Checks the pending provision parameters, not the current ones. * - * Emits a {ProvisionPendingParametersAccepted} event. - * * @param _serviceProvider The address of the service provider. */ function _acceptProvisionParameters(address _serviceProvider) internal { diff --git a/packages/horizon/contracts/interfaces/IAuthorizable.sol b/packages/horizon/contracts/interfaces/IAuthorizable.sol index 1ba46341c..cd6553fa7 100644 --- a/packages/horizon/contracts/interfaces/IAuthorizable.sol +++ b/packages/horizon/contracts/interfaces/IAuthorizable.sol @@ -100,7 +100,7 @@ interface IAuthorizable { * - `proof` must be a valid signature from the signer being authorized * * Emits a {SignerAuthorized} event - * @param signer The addres of the signer + * @param signer The address of the signer * @param proofDeadline The deadline for the proof provided by the signer * @param proof The proof provided by the signer to be authorized by the authorizer * consists of (chain id, verifying contract address, domain, proof deadline, authorizer address) @@ -141,7 +141,8 @@ interface IAuthorizable { function revokeAuthorizedSigner(address signer) external; /** - * @notice Returns the timestamp at which the thawing period ends for a signer + * @notice Returns the timestamp at which the thawing period ends for a signer. + * Returns 0 if the signer is not thawing. * @param signer The address of the signer * @return The timestamp at which the thawing period ends */ diff --git a/packages/horizon/contracts/interfaces/internal/IHorizonStakingBase.sol b/packages/horizon/contracts/interfaces/internal/IHorizonStakingBase.sol index 280facaf7..fb642fe84 100644 --- a/packages/horizon/contracts/interfaces/internal/IHorizonStakingBase.sol +++ b/packages/horizon/contracts/interfaces/internal/IHorizonStakingBase.sol @@ -169,7 +169,11 @@ interface IHorizonStakingBase { ) external view returns (LinkedList.List memory); /** - * @notice Gets the amount of thawed tokens for a given provision. + * @notice Gets the amount of thawed tokens that can be releasedfor a given provision. + * @dev Note that the value returned by this function does not return the total amount of thawed tokens + * but only those that can be released. If thaw requests are created with different thawing periods it's + * possible that an unexpired thaw request temporarily blocks the release of other ones that have already + * expired. This function will stop counting when it encounters the first thaw request that is not yet expired. * @param thawRequestType The type of thaw request. * @param serviceProvider The address of the service provider. * @param verifier The address of the verifier. diff --git a/packages/horizon/contracts/interfaces/internal/IHorizonStakingExtension.sol b/packages/horizon/contracts/interfaces/internal/IHorizonStakingExtension.sol index 77ce2cc03..de39ab52c 100644 --- a/packages/horizon/contracts/interfaces/internal/IHorizonStakingExtension.sol +++ b/packages/horizon/contracts/interfaces/internal/IHorizonStakingExtension.sol @@ -140,6 +140,10 @@ interface IHorizonStakingExtension is IRewardsIssuer { /** * @notice Slash the indexer stake. Delegated tokens are not subject to slashing. + * Note that depending on the state of the indexer's stake, the slashed amount might be smaller than the + * requested slash amount. This can happen if the indexer has moved a significant part of their stake to + * a provision. Any outstanding slashing amount should be settled using Horizon's slash function + * {IHorizonStaking.slash}. * @dev Can only be called by the slasher role. * @param indexer Address of indexer to slash * @param tokens Amount of tokens to slash from the indexer stake diff --git a/packages/horizon/contracts/interfaces/internal/IHorizonStakingMain.sol b/packages/horizon/contracts/interfaces/internal/IHorizonStakingMain.sol index 4fc677b83..71cf7b3b4 100644 --- a/packages/horizon/contracts/interfaces/internal/IHorizonStakingMain.sol +++ b/packages/horizon/contracts/interfaces/internal/IHorizonStakingMain.sol @@ -24,7 +24,7 @@ interface IHorizonStakingMain { /** * @notice Emitted when a service provider unstakes tokens during the transition period. * @param serviceProvider The address of the service provider - * @param tokens The amount of tokens unstaked + * @param tokens The amount of tokens now locked (including previously locked tokens) * @param until The block number until the stake is locked */ event HorizonStakeLocked(address indexed serviceProvider, uint256 tokens, uint256 until); @@ -189,12 +189,14 @@ interface IHorizonStakingMain { * @param verifier The address of the verifier * @param delegator The address of the delegator * @param tokens The amount of tokens undelegated + * @param shares The amount of shares undelegated */ event TokensUndelegated( address indexed serviceProvider, address indexed verifier, address indexed delegator, - uint256 tokens + uint256 tokens, + uint256 shares ); /** @@ -254,6 +256,7 @@ interface IHorizonStakingMain { * @param shares The amount of shares being thawed * @param thawingUntil The timestamp until the stake is thawed * @param thawRequestId The ID of the thaw request + * @param nonce The nonce of the thaw request */ event ThawRequestCreated( IHorizonStakingTypes.ThawRequestType indexed requestType, @@ -262,7 +265,8 @@ interface IHorizonStakingMain { address owner, uint256 shares, uint64 thawingUntil, - bytes32 thawRequestId + bytes32 thawRequestId, + uint256 nonce ); /** @@ -508,6 +512,11 @@ interface IHorizonStakingMain { */ error HorizonStakingLegacySlashFailed(); + /** + * @notice Thrown when there attempting to slash a provision with no tokens to slash. + */ + error HorizonStakingNoTokensToSlash(); + // -- Functions -- /** @@ -539,10 +548,10 @@ interface IHorizonStakingMain { */ function stakeTo(address serviceProvider, uint256 tokens) external; - // can be called by anyone if the service provider has provisioned stake to this verifier /** * @notice Deposit tokens on the service provider stake, on behalf of the service provider, * provisioned to a specific verifier. + * @dev This function can be called by the service provider, by an authorized operator or by the verifier itself. * @dev Requirements: * - The `serviceProvider` must have previously provisioned stake to `verifier`. * - `_tokens` cannot be zero. @@ -657,7 +666,8 @@ interface IHorizonStakingMain { /** * @notice Remove tokens from a provision and move them back to the service provider's idle stake. * @dev The parameter `nThawRequests` can be set to a non zero value to fulfill a specific number of thaw - * requests in the event that fulfilling all of them results in a gas limit error. + * requests in the event that fulfilling all of them results in a gas limit error. Otherwise, the function + * will attempt to fulfill all thaw requests until the first one that is not yet expired is found. * * Requirements: * - Must have previously initiated a thaw request using {thaw}. @@ -697,10 +707,15 @@ interface IHorizonStakingMain { /** * @notice Stages a provision parameter update. Note that the change is not effective until the verifier calls - * {acceptProvisionParameters}. + * {acceptProvisionParameters}. Calling this function is a no-op if the new parameters are the same as the current + * ones. * @dev This two step update process prevents the service provider from changing the parameters * without the verifier's consent. * + * Requirements: + * - `thawingPeriod` must be less than or equal to `_maxThawingPeriod`. Note that if `_maxThawingPeriod` changes the + * function will not revert if called with the same thawing period as the current one. + * * Emits a {ProvisionParametersStaged} event if at least one of the parameters changed. * * @param serviceProvider The service provider address @@ -781,7 +796,8 @@ interface IHorizonStakingMain { /** * @notice Withdraw undelegated tokens from a provision after thawing. * @dev The parameter `nThawRequests` can be set to a non zero value to fulfill a specific number of thaw - * requests in the event that fulfilling all of them results in a gas limit error. + * requests in the event that fulfilling all of them results in a gas limit error. Otherwise, the function + * will attempt to fulfill all thaw requests until the first one that is not yet expired is found. * @dev If the delegation pool was completely slashed before withdrawing, calling this function will fulfill * the thaw requests with an amount equal to zero. * diff --git a/packages/horizon/contracts/interfaces/internal/IHorizonStakingTypes.sol b/packages/horizon/contracts/interfaces/internal/IHorizonStakingTypes.sol index 6c3420cc7..0ab84fc1b 100644 --- a/packages/horizon/contracts/interfaces/internal/IHorizonStakingTypes.sol +++ b/packages/horizon/contracts/interfaces/internal/IHorizonStakingTypes.sol @@ -22,6 +22,8 @@ interface IHorizonStakingTypes { * @param createdAt Timestamp when the provision was created * @param maxVerifierCutPending Pending value for `maxVerifierCut`. Verifier needs to accept it before it becomes active. * @param thawingPeriodPending Pending value for `thawingPeriod`. Verifier needs to accept it before it becomes active. + * @param lastParametersStagedAt Timestamp when the provision parameters were last staged. Can be used by data service implementation to + * implement arbitrary parameter update logic. * @param thawingNonce Value of the current thawing nonce. Thaw requests with older nonces are invalid. */ struct Provision { @@ -33,6 +35,7 @@ interface IHorizonStakingTypes { uint64 createdAt; uint32 maxVerifierCutPending; uint64 thawingPeriodPending; + uint256 lastParametersStagedAt; uint256 thawingNonce; } @@ -148,13 +151,13 @@ interface IHorizonStakingTypes { * ordered by creation timestamp. * @param shares Shares that represent the tokens being thawed * @param thawingUntil The timestamp when the thawed funds can be removed from the provision - * @param next Id of the next thaw request in the linked list + * @param nextRequest Id of the next thaw request in the linked list * @param thawingNonce Used to invalidate unfulfilled thaw requests */ struct ThawRequest { uint256 shares; uint64 thawingUntil; - bytes32 next; + bytes32 nextRequest; uint256 thawingNonce; } diff --git a/packages/horizon/contracts/libraries/MathUtils.sol b/packages/horizon/contracts/libraries/MathUtils.sol index 7b023e556..fc81e9608 100644 --- a/packages/horizon/contracts/libraries/MathUtils.sol +++ b/packages/horizon/contracts/libraries/MathUtils.sol @@ -13,7 +13,7 @@ library MathUtils { * @dev Calculates the weighted average of two values pondering each of these * values based on configured weights. The contribution of each value N is * weightN/(weightA + weightB). The calculation rounds up to ensure the result - * is always greater than the smallest of the two values. + * is always equal or greater than the smallest of the two values. * @param valueA The amount for value A * @param weightA The weight to use for value A * @param valueB The amount for value B diff --git a/packages/horizon/contracts/libraries/UintRange.sol b/packages/horizon/contracts/libraries/UintRange.sol index 54552b549..69d3f5d8a 100644 --- a/packages/horizon/contracts/libraries/UintRange.sol +++ b/packages/horizon/contracts/libraries/UintRange.sol @@ -8,8 +8,6 @@ pragma solidity 0.8.27; * bugs. We may have an active bug bounty program. */ library UintRange { - using UintRange for uint256; - /** * @notice Checks if a value is in the range [`min`, `max`]. * @param value The value to check. diff --git a/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol b/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol index 73f887527..c3009f41a 100644 --- a/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol +++ b/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol @@ -28,7 +28,7 @@ contract GraphTallyCollector is EIP712, GraphDirectory, Authorizable, IGraphTall /// @notice The EIP712 typehash for the ReceiptAggregateVoucher struct bytes32 private constant EIP712_RAV_TYPEHASH = keccak256( - "ReceiptAggregateVoucher(address payer,address serviceProvider,address dataService,uint64 timestampNs,uint128 valueAggregate,bytes metadata)" + "ReceiptAggregateVoucher(bytes32 collectionId,address payer,address serviceProvider,address dataService,uint64 timestampNs,uint128 valueAggregate,bytes metadata)" ); /// @notice Tracks the amount of tokens already collected by a data service from a payer to a receiver. @@ -88,7 +88,7 @@ contract GraphTallyCollector is EIP712, GraphDirectory, Authorizable, IGraphTall * This variant adds the ability to partially collect a RAV by specifying the amount of tokens to collect. * @param _paymentType The payment type to collect * @param _data Additional data required for the payment collection - * @param _tokensToCollect The amount of tokens to collect + * @param _tokensToCollect The amount of tokens to collect. If 0, all tokens from the RAV will be collected. * @return The amount of tokens collected */ function _collect( @@ -189,6 +189,7 @@ contract GraphTallyCollector is EIP712, GraphDirectory, Authorizable, IGraphTall keccak256( abi.encode( EIP712_RAV_TYPEHASH, + _rav.collectionId, _rav.payer, _rav.serviceProvider, _rav.dataService, diff --git a/packages/horizon/contracts/staking/HorizonStaking.sol b/packages/horizon/contracts/staking/HorizonStaking.sol index c495a7914..49d753ffb 100644 --- a/packages/horizon/contracts/staking/HorizonStaking.sol +++ b/packages/horizon/contracts/staking/HorizonStaking.sol @@ -32,9 +32,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { using PPMMath for uint256; using LinkedList for LinkedList.List; - /// @dev Fixed point precision - uint256 private constant FIXED_POINT_PRECISION = 1e18; - /// @dev Maximum number of simultaneous stake thaw requests (per provision) or undelegations (per delegation) uint256 private constant MAX_THAW_REQUESTS = 1_000; @@ -57,6 +54,19 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { _; } + /** + * @notice Checks that the caller is authorized to operate over a provision or it is the verifier. + * @param serviceProvider The address of the service provider. + * @param verifier The address of the verifier. + */ + modifier onlyAuthorizedOrVerifier(address serviceProvider, address verifier) { + require( + _isAuthorized(serviceProvider, verifier, msg.sender) || msg.sender == verifier, + HorizonStakingNotAuthorized(serviceProvider, verifier, msg.sender) + ); + _; + } + /** * @dev The staking contract is upgradeable however we still use the constructor to set * a few immutable variables. @@ -121,7 +131,11 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { } /// @inheritdoc IHorizonStakingMain - function stakeToProvision(address serviceProvider, address verifier, uint256 tokens) external override notPaused { + function stakeToProvision( + address serviceProvider, + address verifier, + uint256 tokens + ) external override notPaused onlyAuthorizedOrVerifier(serviceProvider, verifier) { _stakeTo(serviceProvider, tokens); _addToProvision(serviceProvider, verifier, tokens); } @@ -202,19 +216,27 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { uint32 newMaxVerifierCut, uint64 newThawingPeriod ) external override notPaused onlyAuthorized(serviceProvider, verifier) { - require(PPMMath.isValidPPM(newMaxVerifierCut), HorizonStakingInvalidMaxVerifierCut(newMaxVerifierCut)); - require( - newThawingPeriod <= _maxThawingPeriod, - HorizonStakingInvalidThawingPeriod(newThawingPeriod, _maxThawingPeriod) - ); - // Provision must exist Provision storage prov = _provisions[serviceProvider][verifier]; require(prov.createdAt != 0, HorizonStakingInvalidProvision(serviceProvider, verifier)); - if ((prov.maxVerifierCutPending != newMaxVerifierCut) || (prov.thawingPeriodPending != newThawingPeriod)) { - prov.maxVerifierCutPending = newMaxVerifierCut; - prov.thawingPeriodPending = newThawingPeriod; + bool verifierCutChanged = prov.maxVerifierCutPending != newMaxVerifierCut; + bool thawingPeriodChanged = prov.thawingPeriodPending != newThawingPeriod; + + if (verifierCutChanged || thawingPeriodChanged) { + if (verifierCutChanged) { + require(PPMMath.isValidPPM(newMaxVerifierCut), HorizonStakingInvalidMaxVerifierCut(newMaxVerifierCut)); + prov.maxVerifierCutPending = newMaxVerifierCut; + } + if (thawingPeriodChanged) { + require( + newThawingPeriod <= _maxThawingPeriod, + HorizonStakingInvalidThawingPeriod(newThawingPeriod, _maxThawingPeriod) + ); + prov.thawingPeriodPending = newThawingPeriod; + } + + prov.lastParametersStagedAt = block.timestamp; emit ProvisionParametersStaged(serviceProvider, verifier, newMaxVerifierCut, newThawingPeriod); } } @@ -277,7 +299,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { address verifier, uint256 shares ) external override notPaused returns (bytes32) { - return _undelegate(ThawRequestType.Delegation, serviceProvider, verifier, shares, msg.sender); + return _undelegate(serviceProvider, verifier, shares); } /// @inheritdoc IHorizonStakingMain @@ -286,15 +308,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { address verifier, uint256 nThawRequests ) external override notPaused { - _withdrawDelegated( - ThawRequestType.Delegation, - serviceProvider, - verifier, - address(0), - address(0), - 0, - nThawRequests - ); + _withdrawDelegated(serviceProvider, verifier, address(0), address(0), 0, nThawRequests); } /// @inheritdoc IHorizonStakingMain @@ -309,7 +323,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { require(newServiceProvider != address(0), HorizonStakingInvalidServiceProviderZeroAddress()); require(newVerifier != address(0), HorizonStakingInvalidVerifierZeroAddress()); _withdrawDelegated( - ThawRequestType.Delegation, oldServiceProvider, oldVerifier, newServiceProvider, @@ -340,7 +353,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { /// @inheritdoc IHorizonStakingMain function undelegate(address serviceProvider, uint256 shares) external override notPaused { - _undelegate(ThawRequestType.Delegation, serviceProvider, SUBGRAPH_DATA_SERVICE_ADDRESS, shares, msg.sender); + _undelegate(serviceProvider, SUBGRAPH_DATA_SERVICE_ADDRESS, shares); } /// @inheritdoc IHorizonStakingMain @@ -394,12 +407,9 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { // Forward call to staking extension // solhint-disable-next-line avoid-low-level-calls (bool success, ) = STAKING_EXTENSION_ADDRESS.delegatecall( - abi.encodeWithSelector( - IHorizonStakingExtension.legacySlash.selector, - serviceProvider, - tokens, - tokensVerifier, - verifierDestination + abi.encodeCall( + IHorizonStakingExtension.legacySlash, + (serviceProvider, tokens, tokensVerifier, verifierDestination) ) ); require(success, HorizonStakingLegacySlashFailed()); @@ -410,7 +420,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { Provision storage prov = _provisions[serviceProvider][verifier]; DelegationPoolInternal storage pool = _getDelegationPool(serviceProvider, verifier); uint256 tokensProvisionTotal = prov.tokens + pool.tokens; - require(tokensProvisionTotal != 0, HorizonStakingInsufficientTokens(tokensProvisionTotal, tokens)); + require(tokensProvisionTotal != 0, HorizonStakingNoTokensToSlash()); uint256 tokensToSlash = MathUtils.min(tokens, tokensProvisionTotal); @@ -433,12 +443,8 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { // Burn remainder _graphToken().burnTokens(providerTokensSlashed - tokensVerifier); - // Provision accounting - uint256 provisionFractionSlashed = (providerTokensSlashed * FIXED_POINT_PRECISION + prov.tokens - 1) / - prov.tokens; - prov.tokensThawing = - (prov.tokensThawing * (FIXED_POINT_PRECISION - provisionFractionSlashed)) / - (FIXED_POINT_PRECISION); + // Provision accounting - round down, 1 wei max precision loss + prov.tokensThawing = (prov.tokensThawing * (prov.tokens - providerTokensSlashed)) / prov.tokens; prov.tokens = prov.tokens - providerTokensSlashed; // If the slashing leaves the thawing shares with no thawing tokens, cancel pending thawings by: @@ -469,13 +475,9 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { // Burn tokens _graphToken().burnTokens(tokensToSlash); - // Delegation pool accounting - uint256 delegationFractionSlashed = (tokensToSlash * FIXED_POINT_PRECISION + pool.tokens - 1) / - pool.tokens; + // Delegation pool accounting - round down, 1 wei max precision loss + pool.tokensThawing = (pool.tokensThawing * (pool.tokens - tokensToSlash)) / pool.tokens; pool.tokens = pool.tokens - tokensToSlash; - pool.tokensThawing = - (pool.tokensThawing * (FIXED_POINT_PRECISION - delegationFractionSlashed)) / - FIXED_POINT_PRECISION; // If the slashing leaves the thawing shares with no thawing tokens, cancel pending thawings by: // - deleting all thawing shares @@ -716,6 +718,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { createdAt: uint64(block.timestamp), maxVerifierCutPending: _maxVerifierCut, thawingPeriodPending: _thawingPeriod, + lastParametersStagedAt: 0, thawingNonce: 0 }); @@ -732,8 +735,9 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { * @param _tokens The amount of tokens to add to the provision */ function _addToProvision(address _serviceProvider, address _verifier, uint256 _tokens) private { - Provision storage prov = _provisions[_serviceProvider][_verifier]; require(_tokens != 0, HorizonStakingInvalidZeroTokens()); + + Provision storage prov = _provisions[_serviceProvider][_verifier]; require(prov.createdAt != 0, HorizonStakingInvalidProvision(_serviceProvider, _verifier)); uint256 tokensIdle = _getIdleStake(_serviceProvider); require(_tokens <= tokensIdle, HorizonStakingInsufficientIdleStake(_tokens, tokensIdle)); @@ -798,7 +802,8 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { /** * @notice Remove tokens from a provision and move them back to the service provider's idle stake. * @dev The parameter `nThawRequests` can be set to a non zero value to fulfill a specific number of thaw - * requests in the event that fulfilling all of them results in a gas limit error. + * requests in the event that fulfilling all of them results in a gas limit error. Otherwise, the function + * will attempt to fulfill all thaw requests until the first one that is not yet expired is found. * @param _serviceProvider The service provider address * @param _verifier The verifier address * @param _nThawRequests The number of thaw requests to fulfill. Set to 0 to fulfill all thaw requests. @@ -893,20 +898,12 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { * invalidated. * @dev Note that delegation that is caught thawing when the pool is invalidated will be completely lost! However delegation shares * that were not thawing will be preserved. - * @param _requestType The type of thaw request (Provision or Delegation). * @param _serviceProvider The service provider address * @param _verifier The verifier address * @param _shares The amount of shares to undelegate - * @param _beneficiary The beneficiary address * @return The ID of the thaw request */ - function _undelegate( - ThawRequestType _requestType, - address _serviceProvider, - address _verifier, - uint256 _shares, - address _beneficiary - ) private returns (bytes32) { + function _undelegate(address _serviceProvider, address _verifier, uint256 _shares) private returns (bytes32) { require(_shares > 0, HorizonStakingInvalidZeroShares()); DelegationPoolInternal storage pool = _getDelegationPool(_serviceProvider, _verifier); DelegationInternal storage delegation = pool.delegators[msg.sender]; @@ -938,26 +935,26 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { } bytes32 thawRequestId = _createThawRequest( - _requestType, + ThawRequestType.Delegation, _serviceProvider, _verifier, - _beneficiary, + msg.sender, thawingShares, thawingUntil, pool.thawingNonce ); - emit TokensUndelegated(_serviceProvider, _verifier, msg.sender, tokens); + emit TokensUndelegated(_serviceProvider, _verifier, msg.sender, tokens, _shares); return thawRequestId; } /** * @notice Withdraw undelegated tokens from a provision after thawing. * @dev The parameter `nThawRequests` can be set to a non zero value to fulfill a specific number of thaw - * requests in the event that fulfilling all of them results in a gas limit error. + * requests in the event that fulfilling all of them results in a gas limit error. Otherwise, the function + * will attempt to fulfill all thaw requests until the first one that is not yet expired is found. * @dev If the delegation pool was completely slashed before withdrawing, calling this function will fulfill * the thaw requests with an amount equal to zero. - * @param _requestType The type of thaw request (Provision or Delegation). * @param _serviceProvider The service provider address * @param _verifier The verifier address * @param _newServiceProvider The new service provider address @@ -966,7 +963,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { * @param _nThawRequests The number of thaw requests to fulfill. Set to 0 to fulfill all thaw requests. */ function _withdrawDelegated( - ThawRequestType _requestType, address _serviceProvider, address _verifier, address _newServiceProvider, @@ -987,7 +983,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { uint256 tokensThawing = pool.tokensThawing; FulfillThawRequestsParams memory params = FulfillThawRequestsParams({ - requestType: _requestType, + requestType: ThawRequestType.Delegation, serviceProvider: _serviceProvider, verifier: _verifier, owner: msg.sender, @@ -1050,10 +1046,10 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { ThawRequest storage thawRequest = _getThawRequest(_requestType, thawRequestId); thawRequest.shares = _shares; thawRequest.thawingUntil = _thawingUntil; - thawRequest.next = bytes32(0); + thawRequest.nextRequest = bytes32(0); thawRequest.thawingNonce = _thawingNonce; - if (thawRequestList.count != 0) _getThawRequest(_requestType, thawRequestList.tail).next = thawRequestId; + if (thawRequestList.count != 0) _getThawRequest(_requestType, thawRequestList.tail).nextRequest = thawRequestId; thawRequestList.addTail(thawRequestId); emit ThawRequestCreated( @@ -1063,13 +1059,17 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { _owner, _shares, _thawingUntil, - thawRequestId + thawRequestId, + _thawingNonce ); return thawRequestId; } /** * @notice Traverses a thaw request list and fulfills expired thaw requests. + * @dev Note that the list is traversed by creation date not by thawing until date. Traversing will stop + * when the first thaw request that is not yet expired is found even if later thaw requests have expired. This + * could happen for example when the thawing period is shortened. * @param _params The parameters for fulfilling thaw requests * @return The amount of thawed tokens * @return The amount of tokens still thawing @@ -1171,6 +1171,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { uint256 tokens = 0; bool validThawRequest = thawRequest.thawingNonce == thawingNonce; if (validThawRequest) { + // sharesThawing cannot be zero if there is a valid thaw request so the next division is safe tokens = (thawRequest.shares * tokensThawing) / sharesThawing; tokensThawing = tokensThawing - tokens; sharesThawing = sharesThawing - thawRequest.shares; diff --git a/packages/horizon/contracts/staking/HorizonStakingBase.sol b/packages/horizon/contracts/staking/HorizonStakingBase.sol index 5ccbddfa2..3b0f29065 100644 --- a/packages/horizon/contracts/staking/HorizonStakingBase.sol +++ b/packages/horizon/contracts/staking/HorizonStakingBase.sol @@ -182,15 +182,19 @@ abstract contract HorizonStakingBase is bytes32 thawRequestId = thawRequestList.head; while (thawRequestId != bytes32(0)) { ThawRequest storage thawRequest = _getThawRequest(requestType, thawRequestId); - if (thawRequest.thawingUntil <= block.timestamp) { - uint256 tokens = (thawRequest.shares * tokensThawing) / sharesThawing; - tokensThawing = tokensThawing - tokens; - sharesThawing = sharesThawing - thawRequest.shares; - thawedTokens = thawedTokens + tokens; - } else { - break; + if (thawRequest.thawingNonce == prov.thawingNonce) { + if (thawRequest.thawingUntil <= block.timestamp) { + // sharesThawing cannot be zero if there is a valid thaw request so the next division is safe + uint256 tokens = (thawRequest.shares * tokensThawing) / sharesThawing; + tokensThawing = tokensThawing - tokens; + sharesThawing = sharesThawing - thawRequest.shares; + thawedTokens = thawedTokens + tokens; + } else { + break; + } } - thawRequestId = thawRequest.next; + + thawRequestId = thawRequest.nextRequest; } return thawedTokens; } @@ -276,7 +280,7 @@ abstract contract HorizonStakingBase is * @return The ID of the next thaw request in the list. */ function _getNextProvisionThawRequest(bytes32 _thawRequestId) internal view returns (bytes32) { - return _thawRequests[ThawRequestType.Provision][_thawRequestId].next; + return _thawRequests[ThawRequestType.Provision][_thawRequestId].nextRequest; } /** @@ -285,7 +289,7 @@ abstract contract HorizonStakingBase is * @return The ID of the next thaw request in the list. */ function _getNextDelegationThawRequest(bytes32 _thawRequestId) internal view returns (bytes32) { - return _thawRequests[ThawRequestType.Delegation][_thawRequestId].next; + return _thawRequests[ThawRequestType.Delegation][_thawRequestId].nextRequest; } /** diff --git a/packages/horizon/contracts/staking/HorizonStakingExtension.sol b/packages/horizon/contracts/staking/HorizonStakingExtension.sol index 00b9c6048..0bfe54a8b 100644 --- a/packages/horizon/contracts/staking/HorizonStakingExtension.sol +++ b/packages/horizon/contracts/staking/HorizonStakingExtension.sol @@ -184,6 +184,18 @@ contract HorizonStakingExtension is HorizonStakingBase, IHorizonStakingExtension } } + // Slashing tokens that are already provisioned would break provision accounting, we need to limit + // the slash amount. This can be compensated for, by slashing with the main slash function if needed. + uint256 slashableStake = indexerStake.tokensStaked - indexerStake.tokensProvisioned; + if (slashableStake == 0) { + emit StakeSlashed(indexer, 0, 0, beneficiary); + return; + } + if (tokens > slashableStake) { + reward = (reward * slashableStake) / tokens; + tokens = slashableStake; + } + // Remove tokens to slash from the stake indexerStake.tokensStaked = indexerStake.tokensStaked - tokens; @@ -211,9 +223,10 @@ contract HorizonStakingExtension is HorizonStakingBase, IHorizonStakingExtension /// @inheritdoc IRewardsIssuer function getAllocationData( address allocationID - ) external view override returns (address, bytes32, uint256, uint256, uint256) { + ) external view override returns (bool, address, bytes32, uint256, uint256, uint256) { Allocation memory allo = __DEPRECATED_allocations[allocationID]; - return (allo.indexer, allo.subgraphDeploymentID, allo.tokens, allo.accRewardsPerAllocatedToken, 0); + bool isActive = _getAllocationState(allocationID) == AllocationState.Active; + return (isActive, allo.indexer, allo.subgraphDeploymentID, allo.tokens, allo.accRewardsPerAllocatedToken, 0); } /// @inheritdoc IHorizonStakingExtension @@ -266,10 +279,9 @@ contract HorizonStakingExtension is HorizonStakingBase, IHorizonStakingExtension /** * @dev Triggers an update of rewards due to a change in allocations. * @param _subgraphDeploymentID Subgraph deployment updated - * @return Accumulated rewards per allocated token for the subgraph deployment */ - function _updateRewards(bytes32 _subgraphDeploymentID) private returns (uint256) { - return _graphRewardsManager().onSubgraphAllocationUpdate(_subgraphDeploymentID); + function _updateRewards(bytes32 _subgraphDeploymentID) private { + _graphRewardsManager().onSubgraphAllocationUpdate(_subgraphDeploymentID); } /** @@ -339,9 +351,6 @@ contract HorizonStakingExtension is HorizonStakingBase, IHorizonStakingExtension require(isIndexerOrOperator, "!auth"); } - // Close the allocation - __DEPRECATED_allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; - // -- Rewards Distribution -- // Process non-zero-allocation rewards tracking @@ -365,6 +374,11 @@ contract HorizonStakingExtension is HorizonStakingBase, IHorizonStakingExtension alloc.tokens; } + // Close the allocation + // Note that this breaks CEI pattern. We update after the rewards distribution logic as it expects the allocation + // to still be active. There shouldn't be reentrancy risk here as all internal calls are to trusted contracts. + __DEPRECATED_allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; + emit AllocationClosed( alloc.indexer, alloc.subgraphDeploymentID, diff --git a/packages/horizon/contracts/staking/HorizonStakingStorage.sol b/packages/horizon/contracts/staking/HorizonStakingStorage.sol index 15ea0cce0..f06ff5cb6 100644 --- a/packages/horizon/contracts/staking/HorizonStakingStorage.sol +++ b/packages/horizon/contracts/staking/HorizonStakingStorage.sol @@ -137,6 +137,10 @@ abstract contract HorizonStakingV1Storage { // -- Horizon Staking -- /// @dev Maximum thawing period, in seconds, for a provision + /// Note that to protect delegation from being unfairly locked this should be set to a sufficiently low value + /// Additionally note that setting this to a high enough value could lead to overflow when calculating thawing until + /// dates. For practical purposes this should not be an issue but we recommend using a value like 1e18 to represent + /// "infinite thawing" if that is the intent. uint64 internal _maxThawingPeriod; /// @dev Provisions from each service provider for each data service diff --git a/packages/horizon/contracts/staking/utilities/Managed.sol b/packages/horizon/contracts/staking/utilities/Managed.sol index c1b466870..b2e36056f 100644 --- a/packages/horizon/contracts/staking/utilities/Managed.sol +++ b/packages/horizon/contracts/staking/utilities/Managed.sol @@ -50,14 +50,6 @@ abstract contract Managed is GraphDirectory { _; } - /** - * @dev Revert if the caller is not the Controller - */ - modifier onlyController() { - require(msg.sender == address(_graphController()), ManagedOnlyController()); - _; - } - /** * @dev Revert if the caller is not the governor */ diff --git a/packages/horizon/test/data-service/DataService.t.sol b/packages/horizon/test/data-service/DataService.t.sol index 4a29a523f..7cf125364 100644 --- a/packages/horizon/test/data-service/DataService.t.sol +++ b/packages/horizon/test/data-service/DataService.t.sol @@ -20,7 +20,7 @@ contract DataServiceTest is HorizonStakingSharedTest { } function test_Constructor_WhenTheContractIsDeployedWithAValidController() external view { - _assert_delegationRatio(type(uint32).min); + _assert_delegationRatio(type(uint32).max); _assert_provisionTokens_range(type(uint256).min, type(uint256).max); _assert_verifierCut_range(type(uint32).min, uint32(PPMMath.MAX_PPM)); _assert_thawingPeriod_range(type(uint64).min, type(uint64).max); diff --git a/packages/horizon/test/data-service/DataServiceUpgradeable.t.sol b/packages/horizon/test/data-service/DataServiceUpgradeable.t.sol index be33173f8..b25953b5e 100644 --- a/packages/horizon/test/data-service/DataServiceUpgradeable.t.sol +++ b/packages/horizon/test/data-service/DataServiceUpgradeable.t.sol @@ -14,7 +14,7 @@ contract DataServiceUpgradeableTest is GraphBaseTest { // via proxy - ensure that the proxy was initialized correctly // these calls validate proxy storage was correctly initialized uint32 delegationRatio = dataService.getDelegationRatio(); - assertEq(delegationRatio, type(uint32).min); + assertEq(delegationRatio, type(uint32).max); (uint256 minTokens, uint256 maxTokens) = dataService.getProvisionTokensRange(); assertEq(minTokens, type(uint256).min); diff --git a/packages/horizon/test/data-service/extensions/DataServicePausableUpgradeable.t.sol b/packages/horizon/test/data-service/extensions/DataServicePausableUpgradeable.t.sol index 6e58810c1..d52732ba3 100644 --- a/packages/horizon/test/data-service/extensions/DataServicePausableUpgradeable.t.sol +++ b/packages/horizon/test/data-service/extensions/DataServicePausableUpgradeable.t.sol @@ -17,7 +17,7 @@ contract DataServicePausableUpgradeableTest is GraphBaseTest { // via proxy - ensure that the proxy was initialized correctly // these calls validate proxy storage was correctly initialized uint32 delegationRatio = dataService.getDelegationRatio(); - assertEq(delegationRatio, type(uint32).min); + assertEq(delegationRatio, type(uint32).max); (uint256 minTokens, uint256 maxTokens) = dataService.getProvisionTokensRange(); assertEq(minTokens, type(uint256).min); diff --git a/packages/horizon/test/shared/horizon-staking/HorizonStakingShared.t.sol b/packages/horizon/test/shared/horizon-staking/HorizonStakingShared.t.sol index 86817c144..20f824e38 100644 --- a/packages/horizon/test/shared/horizon-staking/HorizonStakingShared.t.sol +++ b/packages/horizon/test/shared/horizon-staking/HorizonStakingShared.t.sol @@ -181,6 +181,62 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { ); } + function _stakeToProvision(address serviceProvider, address verifier, uint256 tokens) internal { + (, address msgSender, ) = vm.readCallers(); + + // before + uint256 beforeStakingBalance = token.balanceOf(address(staking)); + uint256 beforeSenderBalance = token.balanceOf(msgSender); + ServiceProviderInternal memory beforeServiceProvider = _getStorage_ServiceProviderInternal(serviceProvider); + Provision memory beforeProvision = staking.getProvision(serviceProvider, verifier); + + // stakeTo + token.approve(address(staking), tokens); + vm.expectEmit(); + emit IHorizonStakingBase.HorizonStakeDeposited(serviceProvider, tokens); + vm.expectEmit(); + emit IHorizonStakingMain.ProvisionIncreased(serviceProvider, verifier, tokens); + staking.stakeToProvision(serviceProvider, verifier, tokens); + + // after + uint256 afterStakingBalance = token.balanceOf(address(staking)); + uint256 afterSenderBalance = token.balanceOf(msgSender); + ServiceProviderInternal memory afterServiceProvider = _getStorage_ServiceProviderInternal(serviceProvider); + Provision memory afterProvision = staking.getProvision(serviceProvider, verifier); + + // assert - stakeTo + assertEq(afterStakingBalance, beforeStakingBalance + tokens); + assertEq(afterSenderBalance, beforeSenderBalance - tokens); + assertEq(afterServiceProvider.tokensStaked, beforeServiceProvider.tokensStaked + tokens); + assertEq(afterServiceProvider.tokensProvisioned, beforeServiceProvider.tokensProvisioned + tokens); + assertEq(afterServiceProvider.__DEPRECATED_tokensAllocated, beforeServiceProvider.__DEPRECATED_tokensAllocated); + assertEq(afterServiceProvider.__DEPRECATED_tokensLocked, beforeServiceProvider.__DEPRECATED_tokensLocked); + assertEq( + afterServiceProvider.__DEPRECATED_tokensLockedUntil, + beforeServiceProvider.__DEPRECATED_tokensLockedUntil + ); + + // assert - addToProvision + assertEq(afterProvision.tokens, beforeProvision.tokens + tokens); + assertEq(afterProvision.tokensThawing, beforeProvision.tokensThawing); + assertEq(afterProvision.sharesThawing, beforeProvision.sharesThawing); + assertEq(afterProvision.maxVerifierCut, beforeProvision.maxVerifierCut); + assertEq(afterProvision.thawingPeriod, beforeProvision.thawingPeriod); + assertEq(afterProvision.createdAt, beforeProvision.createdAt); + assertEq(afterProvision.lastParametersStagedAt, beforeProvision.lastParametersStagedAt); + assertEq(afterProvision.maxVerifierCutPending, beforeProvision.maxVerifierCutPending); + assertEq(afterProvision.thawingPeriodPending, beforeProvision.thawingPeriodPending); + assertEq(afterProvision.thawingNonce, beforeProvision.thawingNonce); + assertEq(afterServiceProvider.tokensStaked, beforeServiceProvider.tokensStaked + tokens); + assertEq(afterServiceProvider.tokensProvisioned, beforeServiceProvider.tokensProvisioned + tokens); + assertEq(afterServiceProvider.__DEPRECATED_tokensAllocated, beforeServiceProvider.__DEPRECATED_tokensAllocated); + assertEq(afterServiceProvider.__DEPRECATED_tokensLocked, beforeServiceProvider.__DEPRECATED_tokensLocked); + assertEq( + afterServiceProvider.__DEPRECATED_tokensLockedUntil, + beforeServiceProvider.__DEPRECATED_tokensLockedUntil + ); + } + function _unstake(uint256 _tokens) internal { (, address msgSender, ) = vm.readCallers(); @@ -355,6 +411,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.createdAt, uint64(block.timestamp)); assertEq(afterProvision.maxVerifierCutPending, maxVerifierCut); assertEq(afterProvision.thawingPeriodPending, thawingPeriod); + assertEq(afterProvision.lastParametersStagedAt, 0); assertEq(afterProvision.thawingNonce, 0); assertEq(afterServiceProvider.tokensStaked, beforeServiceProvider.tokensStaked); assertEq(afterServiceProvider.tokensProvisioned, tokens + beforeServiceProvider.tokensProvisioned); @@ -387,6 +444,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.maxVerifierCut, beforeProvision.maxVerifierCut); assertEq(afterProvision.thawingPeriod, beforeProvision.thawingPeriod); assertEq(afterProvision.createdAt, beforeProvision.createdAt); + assertEq(afterProvision.lastParametersStagedAt, beforeProvision.lastParametersStagedAt); assertEq(afterProvision.maxVerifierCutPending, beforeProvision.maxVerifierCutPending); assertEq(afterProvision.thawingPeriodPending, beforeProvision.thawingPeriodPending); assertEq(afterProvision.thawingNonce, beforeProvision.thawingNonce); @@ -426,7 +484,8 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { serviceProvider, thawingShares, uint64(block.timestamp + beforeProvision.thawingPeriod), - expectedThawRequestId + expectedThawRequestId, + beforeProvision.thawingNonce ); vm.expectEmit(address(staking)); emit IHorizonStakingMain.ProvisionThawed(serviceProvider, verifier, tokens); @@ -461,11 +520,12 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.createdAt, beforeProvision.createdAt); assertEq(afterProvision.maxVerifierCutPending, beforeProvision.maxVerifierCutPending); assertEq(afterProvision.thawingPeriodPending, beforeProvision.thawingPeriodPending); + assertEq(afterProvision.lastParametersStagedAt, beforeProvision.lastParametersStagedAt); assertEq(afterProvision.thawingNonce, beforeProvision.thawingNonce); assertEq(thawRequestId, expectedThawRequestId); assertEq(afterThawRequest.shares, thawingShares); assertEq(afterThawRequest.thawingUntil, block.timestamp + beforeProvision.thawingPeriod); - assertEq(afterThawRequest.next, bytes32(0)); + assertEq(afterThawRequest.nextRequest, bytes32(0)); assertEq( afterThawRequestList.head, beforeThawRequestList.count == 0 ? thawRequestId : beforeThawRequestList.head @@ -474,7 +534,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterThawRequestList.count, beforeThawRequestList.count + 1); assertEq(afterThawRequestList.nonce, beforeThawRequestList.nonce + 1); if (beforeThawRequestList.count != 0) { - assertEq(afterPreviousTailThawRequest.next, thawRequestId); + assertEq(afterPreviousTailThawRequest.nextRequest, thawRequestId); } return thawRequestId; @@ -546,6 +606,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.createdAt, beforeProvision.createdAt); assertEq(afterProvision.maxVerifierCutPending, beforeProvision.maxVerifierCutPending); assertEq(afterProvision.thawingPeriodPending, beforeProvision.thawingPeriodPending); + assertEq(afterProvision.lastParametersStagedAt, beforeProvision.lastParametersStagedAt); assertEq(afterProvision.thawingNonce, beforeProvision.thawingNonce); assertEq(afterServiceProvider.tokensStaked, beforeServiceProvider.tokensStaked); assertEq( @@ -565,7 +626,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { ); assertEq(thawRequest.shares, 0); assertEq(thawRequest.thawingUntil, 0); - assertEq(thawRequest.next, bytes32(0)); + assertEq(thawRequest.nextRequest, bytes32(0)); } if (calcValues.thawRequestsFulfilledList.length == 0) { assertEq(afterThawRequestList.head, beforeThawRequestList.head); @@ -574,7 +635,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { afterThawRequestList.head, calcValues.thawRequestsFulfilledList.length == beforeThawRequestList.count ? bytes32(0) - : calcValues.thawRequestsFulfilledList[calcValues.thawRequestsFulfilledList.length - 1].next + : calcValues.thawRequestsFulfilledList[calcValues.thawRequestsFulfilledList.length - 1].nextRequest ); } assertEq( @@ -672,6 +733,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.createdAt, beforeValues.provision.createdAt); assertEq(afterProvision.maxVerifierCutPending, beforeValues.provision.maxVerifierCutPending); assertEq(afterProvision.thawingPeriodPending, beforeValues.provision.thawingPeriodPending); + assertEq(afterProvision.lastParametersStagedAt, beforeValues.provision.lastParametersStagedAt); assertEq(afterProvision.thawingNonce, beforeValues.provision.thawingNonce); // assert: provision new verifier @@ -718,7 +780,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { ); assertEq(thawRequest.shares, 0); assertEq(thawRequest.thawingUntil, 0); - assertEq(thawRequest.next, bytes32(0)); + assertEq(thawRequest.nextRequest, bytes32(0)); } if (calcValues.thawRequestsFulfilledList.length == 0) { assertEq(afterThawRequestList.head, beforeValues.thawRequestList.head); @@ -727,7 +789,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { afterThawRequestList.head, calcValues.thawRequestsFulfilledList.length == beforeValues.thawRequestList.count ? bytes32(0) - : calcValues.thawRequestsFulfilledList[calcValues.thawRequestsFulfilledList.length - 1].next + : calcValues.thawRequestsFulfilledList[calcValues.thawRequestsFulfilledList.length - 1].nextRequest ); } assertEq( @@ -753,7 +815,9 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { Provision memory beforeProvision = staking.getProvision(serviceProvider, verifier); // setProvisionParameters - if (beforeProvision.maxVerifierCut != maxVerifierCut || beforeProvision.thawingPeriod != thawingPeriod) { + bool paramsChanged = beforeProvision.maxVerifierCut != maxVerifierCut || + beforeProvision.thawingPeriod != thawingPeriod; + if (paramsChanged) { vm.expectEmit(); emit IHorizonStakingMain.ProvisionParametersStaged( serviceProvider, @@ -776,6 +840,10 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.createdAt, beforeProvision.createdAt); assertEq(afterProvision.maxVerifierCutPending, maxVerifierCut); assertEq(afterProvision.thawingPeriodPending, thawingPeriod); + assertEq( + afterProvision.lastParametersStagedAt, + paramsChanged ? block.timestamp : beforeProvision.lastParametersStagedAt + ); assertEq(afterProvision.thawingNonce, beforeProvision.thawingNonce); } @@ -812,6 +880,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterProvision.thawingPeriod, beforeProvision.thawingPeriodPending); assertEq(afterProvision.thawingPeriod, afterProvision.thawingPeriodPending); assertEq(afterProvision.createdAt, beforeProvision.createdAt); + assertEq(afterProvision.lastParametersStagedAt, beforeProvision.lastParametersStagedAt); assertEq(afterProvision.thawingNonce, beforeProvision.thawingNonce); } @@ -1007,10 +1076,11 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { beneficiary, calcValues.thawingShares, calcValues.thawingUntil, - calcValues.thawRequestId + calcValues.thawRequestId, + beforeValues.pool.thawingNonce ); vm.expectEmit(); - emit IHorizonStakingMain.TokensUndelegated(serviceProvider, verifier, delegator, calcValues.tokens); + emit IHorizonStakingMain.TokensUndelegated(serviceProvider, verifier, delegator, calcValues.tokens, shares); if (legacy) { staking.undelegate(serviceProvider, shares); } else if (thawRequestType == IHorizonStakingTypes.ThawRequestType.Delegation) { @@ -1054,7 +1124,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(beforeValues.delegation.shares - shares, afterDelegation.shares); assertEq(afterThawRequest.shares, calcValues.thawingShares); assertEq(afterThawRequest.thawingUntil, calcValues.thawingUntil); - assertEq(afterThawRequest.next, bytes32(0)); + assertEq(afterThawRequest.nextRequest, bytes32(0)); assertEq(calcValues.thawRequestId, afterThawRequestList.tail); assertEq(beforeValues.thawRequestList.nonce + 1, afterThawRequestList.nonce); assertEq(beforeValues.thawRequestList.count + 1, afterThawRequestList.count); @@ -1259,7 +1329,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { ); assertEq(thawRequest.shares, 0); assertEq(thawRequest.thawingUntil, 0); - assertEq(thawRequest.next, bytes32(0)); + assertEq(thawRequest.nextRequest, bytes32(0)); } if (calcValues.thawRequestsFulfilledList.length == 0) { assertEq(afterValues.thawRequestList.head, beforeValues.thawRequestList.head); @@ -1268,7 +1338,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { afterValues.thawRequestList.head, calcValues.thawRequestsFulfilledList.length == beforeValues.thawRequestList.count ? bytes32(0) - : calcValues.thawRequestsFulfilledList[calcValues.thawRequestsFulfilledList.length - 1].next + : calcValues.thawRequestsFulfilledList[calcValues.thawRequestsFulfilledList.length - 1].nextRequest ); } assertEq( @@ -1511,9 +1581,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { uint256 tokensSlashed = calcValues.providerTokensSlashed + (isDelegationSlashingEnabled ? calcValues.delegationTokensSlashed : 0); uint256 provisionThawingTokens = (before.provision.tokensThawing * - (1e18 - - ((calcValues.providerTokensSlashed * 1e18 + before.provision.tokens - 1) / - before.provision.tokens))) / (1e18); + (before.provision.tokens - calcValues.providerTokensSlashed)) / before.provision.tokens; // assert assertEq(afterProvision.tokens + calcValues.providerTokensSlashed, before.provision.tokens); @@ -1534,9 +1602,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { ); if (isDelegationSlashingEnabled) { uint256 poolThawingTokens = (before.pool.tokensThawing * - (1e18 - - ((calcValues.delegationTokensSlashed * 1e18 + before.pool.tokens - 1) / before.pool.tokens))) / - (1e18); + (before.pool.tokens - calcValues.delegationTokensSlashed)) / before.pool.tokens; assertEq(afterPool.tokens + calcValues.delegationTokensSlashed, before.pool.tokens); assertEq(afterPool.shares, before.pool.shares); assertEq(afterPool.tokensThawing, poolThawingTokens); @@ -2371,7 +2437,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { } else { break; } - thawRequestId = thawRequest.next; + thawRequestId = thawRequest.nextRequest; } // we need to do a second pass because solidity doesnt allow dynamic arrays on memory @@ -2398,12 +2464,12 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { } thawRequestData.thawRequestsFulfilledListIds[i] = thawRequestId; thawRequestData.thawRequestsFulfilledList[i] = _getThawRequest(params.thawRequestType, thawRequestId); - thawRequestId = thawRequestData.thawRequestsFulfilledList[i].next; + thawRequestId = thawRequestData.thawRequestsFulfilledList[i].nextRequest; i++; } else { break; } - thawRequestId = thawRequest.next; + thawRequestId = thawRequest.nextRequest; } assertEq(thawRequestsFulfilled, thawRequestData.thawRequestsFulfilledList.length); diff --git a/packages/horizon/test/staking/provision/parameters.t.sol b/packages/horizon/test/staking/provision/parameters.t.sol index 6e0b4fabf..b29fc32bb 100644 --- a/packages/horizon/test/staking/provision/parameters.t.sol +++ b/packages/horizon/test/staking/provision/parameters.t.sol @@ -70,6 +70,47 @@ contract HorizonStakingProvisionParametersTest is HorizonStakingTest { staking.setProvisionParameters(users.indexer, subgraphDataServiceAddress, maxVerifierCut, thawingPeriod); } + function test_ProvisionParametersSet_MaxMaxThawingPeriodChanged( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod + ) public useIndexer { + vm.assume(amount > 0); + vm.assume(amount <= MAX_STAKING_TOKENS); + vm.assume(maxVerifierCut <= MAX_PPM); + + // create provision with initial parameters + uint32 initialMaxVerifierCut = 1000; + uint64 initialThawingPeriod = 14 days; // Max thawing period is 28 days + _createProvision( + users.indexer, + subgraphDataServiceAddress, + amount, + initialMaxVerifierCut, + initialThawingPeriod + ); + + // change the max thawing period allowed so that the initial thawing period is not valid anymore + uint64 newMaxThawingPeriod = 7 days; + resetPrank(users.governor); + _setMaxThawingPeriod(newMaxThawingPeriod); + + // set the verifier cut to a new value - keep the thawing period the same, it should be allowed + resetPrank(users.indexer); + _setProvisionParameters(users.indexer, subgraphDataServiceAddress, maxVerifierCut, initialThawingPeriod); + + // now try to change the thawing period to a new value that is invalid + vm.assume(thawingPeriod > initialThawingPeriod); + vm.expectRevert( + abi.encodeWithSelector( + IHorizonStakingMain.HorizonStakingInvalidThawingPeriod.selector, + thawingPeriod, + newMaxThawingPeriod + ) + ); + staking.setProvisionParameters(users.indexer, subgraphDataServiceAddress, maxVerifierCut, thawingPeriod); + } + function test_ProvisionParametersAccept( uint256 amount, uint32 maxVerifierCut, diff --git a/packages/horizon/test/staking/provision/provision.t.sol b/packages/horizon/test/staking/provision/provision.t.sol index ba580be25..c87e13a45 100644 --- a/packages/horizon/test/staking/provision/provision.t.sol +++ b/packages/horizon/test/staking/provision/provision.t.sol @@ -80,19 +80,6 @@ contract HorizonStakingProvisionTest is HorizonStakingTest { staking.provision(users.indexer, subgraphDataServiceAddress, amount / 2, maxVerifierCut, thawingPeriod); } - function testProvision_OperatorAddTokensToProvision( - uint256 amount, - uint32 maxVerifierCut, - uint64 thawingPeriod, - uint256 tokensToAdd - ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) useOperator { - tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); - - // Add more tokens to the provision - _stakeTo(users.indexer, tokensToAdd); - _addToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); - } - function testProvision_RevertWhen_OperatorNotAuthorized( uint256 amount, uint32 maxVerifierCut, @@ -124,4 +111,113 @@ contract HorizonStakingProvisionTest is HorizonStakingTest { vm.expectRevert(expectedError); staking.provision(users.indexer, subgraphDataServiceAddress, amount, 0, 0); } + + function testProvision_AddTokensToProvision( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Add more tokens to the provision + _stakeTo(users.indexer, tokensToAdd); + _addToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); + } + + function testProvision_OperatorAddTokensToProvision( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) useOperator { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Add more tokens to the provision + _stakeTo(users.indexer, tokensToAdd); + _addToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); + } + + function testProvision_AddTokensToProvision_RevertWhen_NotAuthorized( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Add more tokens to the provision + _stakeTo(users.indexer, tokensToAdd); + + // use delegator as a non authorized operator + vm.startPrank(users.delegator); + bytes memory expectedError = abi.encodeWithSignature( + "HorizonStakingNotAuthorized(address,address,address)", + users.indexer, + subgraphDataServiceAddress, + users.delegator + ); + vm.expectRevert(expectedError); + staking.addToProvision(users.indexer, subgraphDataServiceAddress, amount); + } + + function testProvision_StakeToProvision( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Add more tokens to the provision + _stakeToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); + } + + function testProvision_Operator_StakeToProvision( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) useOperator { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Add more tokens to the provision + _stakeToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); + } + + function testProvision_Verifier_StakeToProvision( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Ensure the verifier has enough tokens to then stake to the provision + token.transfer(subgraphDataServiceAddress, tokensToAdd); + + // Add more tokens to the provision + resetPrank(subgraphDataServiceAddress); + _stakeToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); + } + + function testProvision_StakeToProvision_RevertWhen_NotAuthorized( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod, + uint256 tokensToAdd + ) public useIndexer useProvision(amount, maxVerifierCut, thawingPeriod) { + tokensToAdd = bound(tokensToAdd, 1, MAX_STAKING_TOKENS); + + // Add more tokens to the provision + vm.startPrank(users.delegator); + bytes memory expectedError = abi.encodeWithSignature( + "HorizonStakingNotAuthorized(address,address,address)", + users.indexer, + subgraphDataServiceAddress, + users.delegator + ); + vm.expectRevert(expectedError); + staking.stakeToProvision(users.indexer, subgraphDataServiceAddress, tokensToAdd); + } } diff --git a/packages/horizon/test/staking/provision/thaw.t.sol b/packages/horizon/test/staking/provision/thaw.t.sol index a16dd7253..46c666618 100644 --- a/packages/horizon/test/staking/provision/thaw.t.sol +++ b/packages/horizon/test/staking/provision/thaw.t.sol @@ -162,4 +162,76 @@ contract HorizonStakingThawTest is HorizonStakingTest { ); vm.assertEq(thawedTokens, thawAmount * thawSteps); } + + function testThaw_GetThawedTokens_AfterProvisionFullySlashed( + uint256 amount, + uint64 thawingPeriod, + uint256 thawAmount + ) public useIndexer useProvision(amount, 0, thawingPeriod) { + // thaw some funds so there are some shares thawing and tokens thawing + thawAmount = bound(thawAmount, 1, amount); + _thaw(users.indexer, subgraphDataServiceAddress, thawAmount); + + // skip to after the thawing period has passed + skip(thawingPeriod + 1); + + // slash all of it + resetPrank(subgraphDataServiceAddress); + _slash(users.indexer, subgraphDataServiceAddress, amount, 0); + + // get the thawed tokens - should be zero now as the pool was reset + uint256 thawedTokens = staking.getThawedTokens( + ThawRequestType.Provision, + users.indexer, + subgraphDataServiceAddress, + users.indexer + ); + vm.assertEq(thawedTokens, 0); + } + + function testThaw_GetThawedTokens_AfterRecoveringProvision( + uint256 amount, + uint64 thawingPeriod, + uint256 thawAmount + ) public useIndexer useProvision(amount, 0, thawingPeriod) { + // thaw some funds so there are some shares thawing and tokens thawing + thawAmount = bound(thawAmount, 1, amount); + _thaw(users.indexer, subgraphDataServiceAddress, thawAmount); + + // skip to after the thawing period has passed + skip(thawingPeriod + 1); + + // slash all of it + resetPrank(subgraphDataServiceAddress); + _slash(users.indexer, subgraphDataServiceAddress, amount, 0); + + // get the thawed tokens - should be zero now as the pool was reset + uint256 thawedTokens = staking.getThawedTokens( + ThawRequestType.Provision, + users.indexer, + subgraphDataServiceAddress, + users.indexer + ); + vm.assertEq(thawedTokens, 0); + + // put some funds back in + resetPrank(users.indexer); + _stake(amount); + _addToProvision(users.indexer, subgraphDataServiceAddress, amount); + + // thaw some more funds + _thaw(users.indexer, subgraphDataServiceAddress, thawAmount); + + // skip to after the thawing period has passed + skip(block.timestamp + thawingPeriod + 1); + + // get the thawed tokens - should be the amount we thawed + thawedTokens = staking.getThawedTokens( + ThawRequestType.Provision, + users.indexer, + subgraphDataServiceAddress, + users.indexer + ); + vm.assertEq(thawedTokens, thawAmount); + } } diff --git a/packages/horizon/test/staking/slash/legacySlash.t.sol b/packages/horizon/test/staking/slash/legacySlash.t.sol index abb587404..338f84bb6 100644 --- a/packages/horizon/test/staking/slash/legacySlash.t.sol +++ b/packages/horizon/test/staking/slash/legacySlash.t.sol @@ -45,34 +45,53 @@ contract HorizonStakingLegacySlashTest is HorizonStakingTest { // before uint256 beforeStakingBalance = token.balanceOf(address(staking)); uint256 beforeRewardsDestinationBalance = token.balanceOf(_beneficiary); + ServiceProviderInternal memory beforeIndexer = _getStorage_ServiceProviderInternal(_indexer); + + // calculate slashable stake + uint256 slashableStake = beforeIndexer.tokensStaked - beforeIndexer.tokensProvisioned; + uint256 actualTokens = _tokens; + uint256 actualRewards = _rewards; + if (slashableStake == 0) { + actualTokens = 0; + actualRewards = 0; + } else if (_tokens > slashableStake) { + actualRewards = (_rewards * slashableStake) / _tokens; + actualTokens = slashableStake; + } // slash vm.expectEmit(address(staking)); - emit IHorizonStakingExtension.StakeSlashed(_indexer, _tokens, _rewards, _beneficiary); + emit IHorizonStakingExtension.StakeSlashed(_indexer, actualTokens, actualRewards, _beneficiary); staking.slash(_indexer, _tokens, _rewards, _beneficiary); // after uint256 afterStakingBalance = token.balanceOf(address(staking)); uint256 afterRewardsDestinationBalance = token.balanceOf(_beneficiary); + ServiceProviderInternal memory afterIndexer = _getStorage_ServiceProviderInternal(_indexer); - assertEq(beforeStakingBalance - _tokens, afterStakingBalance); - assertEq(beforeRewardsDestinationBalance, afterRewardsDestinationBalance - _rewards); + assertEq(beforeStakingBalance - actualTokens, afterStakingBalance); + assertEq(beforeRewardsDestinationBalance, afterRewardsDestinationBalance - actualRewards); + assertEq(afterIndexer.tokensStaked, beforeIndexer.tokensStaked - actualTokens); } /* * TESTS */ - function testSlash_Legacy( - uint256 tokens, + uint256 tokensStaked, + uint256 tokensProvisioned, uint256 slashTokens, uint256 reward ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokens > 1); - slashTokens = bound(slashTokens, 1, tokens); + vm.assume(tokensStaked > 0); + vm.assume(tokensStaked <= MAX_STAKING_TOKENS); + vm.assume(tokensProvisioned > 0); + vm.assume(tokensProvisioned <= tokensStaked); + slashTokens = bound(slashTokens, 1, tokensStaked); reward = bound(reward, 0, slashTokens); - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); + _stake(tokensStaked); + _provision(users.indexer, subgraphDataServiceLegacyAddress, tokensProvisioned, 0, 0); resetPrank(users.legacySlasher); _legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); diff --git a/packages/horizon/test/staking/slash/slash.t.sol b/packages/horizon/test/staking/slash/slash.t.sol index 3f67a9e59..e1aad1336 100644 --- a/packages/horizon/test/staking/slash/slash.t.sol +++ b/packages/horizon/test/staking/slash/slash.t.sol @@ -96,11 +96,7 @@ contract HorizonStakingSlashTest is HorizonStakingTest { function testSlash_RevertWhen_NoProvision(uint256 tokens, uint256 slashTokens) public useIndexer useStake(tokens) { vm.assume(slashTokens > 0); - bytes memory expectedError = abi.encodeWithSelector( - IHorizonStakingMain.HorizonStakingInsufficientTokens.selector, - 0, - slashTokens - ); + bytes memory expectedError = abi.encodeWithSelector(IHorizonStakingMain.HorizonStakingNoTokensToSlash.selector); vm.expectRevert(expectedError); vm.startPrank(subgraphDataServiceAddress); staking.slash(users.indexer, slashTokens, 0, subgraphDataServiceAddress); @@ -141,45 +137,57 @@ contract HorizonStakingSlashTest is HorizonStakingTest { _slash(users.indexer, subgraphDataServiceAddress, tokens + delegationTokens, 0); } - function testSlash_RoundDown_TokensThawing_Provision() public useIndexer { - uint256 tokens = 1 ether + 1; + function testSlash_RoundDown_TokensThawing_Provision( + uint256 tokens, + uint256 slashTokens, + uint256 tokensToThaw + ) public useIndexer { + vm.assume(slashTokens <= tokens); + vm.assume(tokensToThaw <= tokens); + vm.assume(tokensToThaw > 0); + _useProvision(subgraphDataServiceAddress, tokens, MAX_PPM, MAX_THAWING_PERIOD); + _thaw(users.indexer, subgraphDataServiceAddress, tokensToThaw); - _thaw(users.indexer, subgraphDataServiceAddress, tokens); + Provision memory beforeProvision = staking.getProvision(users.indexer, subgraphDataServiceAddress); resetPrank(subgraphDataServiceAddress); - _slash(users.indexer, subgraphDataServiceAddress, 1, 0); + _slash(users.indexer, subgraphDataServiceAddress, slashTokens, 0); - resetPrank(users.indexer); - Provision memory provision = staking.getProvision(users.indexer, subgraphDataServiceAddress); - assertEq(provision.tokens, tokens - 1); - // Tokens thawing should be rounded down - assertEq(provision.tokensThawing, tokens - 2); + Provision memory afterProvision = staking.getProvision(users.indexer, subgraphDataServiceAddress); + assertEq(afterProvision.tokens, beforeProvision.tokens - slashTokens); + assertEq( + afterProvision.tokensThawing, + (beforeProvision.tokensThawing * (beforeProvision.tokens - slashTokens)) / beforeProvision.tokens + ); } function testSlash_RoundDown_TokensThawing_Delegation( - uint256 tokens + uint256 tokens, + uint256 delegationTokensToSlash, + uint256 delegationTokensToUndelegate ) public useIndexer useProvision(tokens, MAX_PPM, 0) useDelegationSlashing { + uint256 delegationTokens = 10 ether; + + vm.assume(delegationTokensToSlash <= delegationTokens); + vm.assume(delegationTokensToUndelegate <= delegationTokens); + vm.assume(delegationTokensToUndelegate > 0); + resetPrank(users.delegator); - uint256 delegationTokens = 1 ether + 1; _delegate(users.indexer, subgraphDataServiceAddress, delegationTokens, 0); + _undelegate(users.indexer, subgraphDataServiceAddress, delegationTokensToUndelegate); - DelegationInternal memory delegation = _getStorage_Delegation( - users.indexer, - subgraphDataServiceAddress, - users.delegator, - false - ); - _undelegate(users.indexer, subgraphDataServiceAddress, delegation.shares); + DelegationPool memory beforePool = staking.getDelegationPool(users.indexer, subgraphDataServiceAddress); + // Slash resetPrank(subgraphDataServiceAddress); - // Slash 1 token from delegation - _slash(users.indexer, subgraphDataServiceAddress, tokens + 1, 0); + _slash(users.indexer, subgraphDataServiceAddress, tokens + delegationTokensToSlash, 0); - resetPrank(users.delegator); - DelegationPool memory pool = staking.getDelegationPool(users.indexer, subgraphDataServiceAddress); - assertEq(pool.tokens, delegationTokens - 1); - // Tokens thawing should be rounded down - assertEq(pool.tokensThawing, delegationTokens - 2); + DelegationPool memory afterPool = staking.getDelegationPool(users.indexer, subgraphDataServiceAddress); + assertEq(afterPool.tokens, beforePool.tokens - delegationTokensToSlash); + assertEq( + afterPool.tokensThawing, + (beforePool.tokensThawing * (beforePool.tokens - delegationTokensToSlash)) / beforePool.tokens + ); } } diff --git a/packages/subgraph-service/contracts/DisputeManager.sol b/packages/subgraph-service/contracts/DisputeManager.sol index 8d80ce1c1..7d700aafe 100644 --- a/packages/subgraph-service/contracts/DisputeManager.sol +++ b/packages/subgraph-service/contracts/DisputeManager.sol @@ -54,10 +54,10 @@ contract DisputeManager is // -- Constants -- - // Maximum value for fisherman reward cut in PPM + /// @notice Maximum value for fisherman reward cut in PPM uint32 public constant MAX_FISHERMAN_REWARD_CUT = 500000; // 50% - // Minimum value for dispute deposit + /// @notice Minimum value for dispute deposit uint256 public constant MIN_DISPUTE_DEPOSIT = 1e18; // 1 GRT // -- Modifiers -- @@ -104,18 +104,18 @@ contract DisputeManager is /// @inheritdoc IDisputeManager function initialize( address owner, - address arbitrator, - uint64 disputePeriod, - uint256 disputeDeposit, + address arbitrator_, + uint64 disputePeriod_, + uint256 disputeDeposit_, uint32 fishermanRewardCut_, uint32 maxSlashingCut_ ) external override initializer { __Ownable_init(owner); __AttestationManager_init(); - _setArbitrator(arbitrator); - _setDisputePeriod(disputePeriod); - _setDisputeDeposit(disputeDeposit); + _setArbitrator(arbitrator_); + _setDisputePeriod(disputePeriod_); + _setDisputeDeposit(disputeDeposit_); _setFishermanRewardCut(fishermanRewardCut_); _setMaxSlashingCut(maxSlashingCut_); } @@ -196,6 +196,46 @@ contract DisputeManager is return (dId1, dId2); } + /// @inheritdoc IDisputeManager + function createAndAcceptLegacyDispute( + address allocationId, + address fisherman, + uint256 tokensSlash, + uint256 tokensRewards + ) external override onlyArbitrator returns (bytes32) { + // Create a disputeId + bytes32 disputeId = keccak256(abi.encodePacked(allocationId, "legacy")); + + // Get the indexer for the legacy allocation + address indexer = _graphStaking().getAllocation(allocationId).indexer; + require(indexer != address(0), DisputeManagerIndexerNotFound(allocationId)); + + // Store dispute + disputes[disputeId] = Dispute( + indexer, + fisherman, + 0, + 0, + DisputeType.LegacyDispute, + IDisputeManager.DisputeStatus.Accepted, + block.timestamp, + block.timestamp + disputePeriod, + 0 + ); + + // Slash the indexer + ISubgraphService subgraphService_ = _getSubgraphService(); + subgraphService_.slash(indexer, abi.encode(tokensSlash, tokensRewards)); + + // Reward the fisherman + _graphToken().pushTokens(fisherman, tokensRewards); + + emit LegacyDisputeCreated(disputeId, indexer, fisherman, allocationId, tokensSlash, tokensRewards); + emit DisputeAccepted(disputeId, indexer, fisherman, tokensRewards); + + return disputeId; + } + /// @inheritdoc IDisputeManager function acceptDispute( bytes32 disputeId, @@ -246,7 +286,7 @@ contract DisputeManager is Dispute storage dispute = disputes[disputeId]; // Check if dispute period has finished - require(dispute.createdAt + disputePeriod < block.timestamp, DisputeManagerDisputePeriodNotFinished()); + require(dispute.cancellableAt <= block.timestamp, DisputeManagerDisputePeriodNotFinished()); _cancelDispute(disputeId, dispute); if (_isDisputeInConflict(dispute)) { @@ -280,8 +320,8 @@ contract DisputeManager is } /// @inheritdoc IDisputeManager - function setSubgraphService(address subgraphService) external override onlyOwner { - _setSubgraphService(subgraphService); + function setSubgraphService(address subgraphService_) external override onlyOwner { + _setSubgraphService(subgraphService_); } /// @inheritdoc IDisputeManager @@ -373,11 +413,12 @@ contract DisputeManager is ) ); - // Only one dispute for a (indexer, subgraphDeploymentId) at a time + // Only one dispute at a time require(!isDisputeCreated(disputeId), DisputeManagerDisputeAlreadyCreated(disputeId)); // Store dispute uint256 stakeSnapshot = _getStakeSnapshot(indexer, provision.tokens); + uint256 cancellableAt = block.timestamp + disputePeriod; disputes[disputeId] = Dispute( indexer, _fisherman, @@ -386,6 +427,7 @@ contract DisputeManager is DisputeType.QueryDispute, IDisputeManager.DisputeStatus.Pending, block.timestamp, + cancellableAt, stakeSnapshot ); @@ -396,6 +438,7 @@ contract DisputeManager is _deposit, _attestation.subgraphDeploymentId, _attestationData, + cancellableAt, stakeSnapshot ); @@ -434,6 +477,7 @@ contract DisputeManager is // Store dispute uint256 stakeSnapshot = _getStakeSnapshot(indexer, provision.tokens); + uint256 cancellableAt = block.timestamp + disputePeriod; disputes[disputeId] = Dispute( alloc.indexer, _fisherman, @@ -442,10 +486,20 @@ contract DisputeManager is DisputeType.IndexingDispute, IDisputeManager.DisputeStatus.Pending, block.timestamp, + cancellableAt, stakeSnapshot ); - emit IndexingDisputeCreated(disputeId, alloc.indexer, _fisherman, _deposit, _allocationId, _poi, stakeSnapshot); + emit IndexingDisputeCreated( + disputeId, + alloc.indexer, + _fisherman, + _deposit, + _allocationId, + _poi, + stakeSnapshot, + cancellableAt + ); return disputeId; } @@ -525,10 +579,15 @@ contract DisputeManager is DisputeManagerInvalidTokensSlash(_tokensSlash, maxTokensSlash) ); - // Rewards amount can only be extracted from service provider tokens so - // we grab the minimum between the slash amount and indexer's tokens - uint256 maxRewardableTokens = Math.min(_tokensSlash, provision.tokens); - uint256 tokensRewards = uint256(fishermanRewardCut).mulPPM(maxRewardableTokens); + // Rewards calculation: + // - Rewards can only be extracted from service provider tokens so we grab the minimum between the slash + // amount and indexer's tokens + // - The applied cut is the minimum between the provision's maxVerifierCut and the current fishermanRewardCut. This + // protects the indexer from sudden changes to the fishermanRewardCut while ensuring the slashing does not revert due + // to excessive rewards being requested. + uint256 maxRewardableTokens = MathUtils.min(_tokensSlash, provision.tokens); + uint256 effectiveCut = MathUtils.min(provision.maxVerifierCut, fishermanRewardCut); + uint256 tokensRewards = effectiveCut.mulPPM(maxRewardableTokens); subgraphService_.slash(_indexer, abi.encode(_tokensSlash, tokensRewards)); return tokensRewards; @@ -628,15 +687,16 @@ contract DisputeManager is * - Thawing stake is not excluded from the snapshot. * - Delegators stake is capped at the delegation ratio to prevent delegators from inflating the snapshot * to increase the indexer slash amount. + * + * Note that the snapshot can be inflated by delegators front-running the dispute creation with a delegation + * to the indexer. Given the snapshot is a cap, the dispute outcome is uncertain and considering the cost of capital + * and slashing risk, this is not a concern. * @param _indexer Indexer address * @param _indexerStake Indexer's stake * @return Total stake snapshot */ function _getStakeSnapshot(address _indexer, uint256 _indexerStake) private view returns (uint256) { - ISubgraphService subgraphService_ = _getSubgraphService(); - uint256 delegatorsStake = _graphStaking().getDelegationPool(_indexer, address(subgraphService_)).tokens; - uint256 delegatorsStakeMax = _indexerStake * uint256(subgraphService_.getDelegationRatio()); - uint256 stakeSnapshot = _indexerStake + MathUtils.min(delegatorsStake, delegatorsStakeMax); - return stakeSnapshot; + uint256 delegatorsStake = _graphStaking().getDelegationPool(_indexer, address(_getSubgraphService())).tokens; + return _indexerStake + delegatorsStake; } } diff --git a/packages/subgraph-service/contracts/SubgraphService.sol b/packages/subgraph-service/contracts/SubgraphService.sol index edf918771..f6cee7d7e 100644 --- a/packages/subgraph-service/contracts/SubgraphService.sol +++ b/packages/subgraph-service/contracts/SubgraphService.sol @@ -77,7 +77,7 @@ contract SubgraphService is address owner, uint256 minimumProvisionTokens, uint32 maximumDelegationRatio, - uint256 stakeToFeesRatio + uint256 stakeToFeesRatio_ ) external initializer { __Ownable_init(owner); __Multicall_init(); @@ -87,7 +87,7 @@ contract SubgraphService is _setProvisionTokensRange(minimumProvisionTokens, type(uint256).max); _setDelegationRatio(maximumDelegationRatio); - _setStakeToFeesRatio(stakeToFeesRatio); + _setStakeToFeesRatio(stakeToFeesRatio_); } /** @@ -113,7 +113,7 @@ contract SubgraphService is address indexer, bytes calldata data ) external override onlyAuthorizedForProvision(indexer) onlyValidProvision(indexer) whenNotPaused { - (string memory url, string memory geohash, address rewardsDestination) = abi.decode( + (string memory url, string memory geohash, address rewardsDestination_) = abi.decode( data, (string, string, address) ); @@ -124,8 +124,8 @@ contract SubgraphService is // Register the indexer indexers[indexer] = Indexer({ registeredAt: block.timestamp, url: url, geoHash: geohash }); - if (rewardsDestination != address(0)) { - _setRewardsDestination(indexer, rewardsDestination); + if (rewardsDestination_ != address(0)) { + _setRewardsDestination(indexer, rewardsDestination_); } emit ServiceProviderRegistered(indexer, data); @@ -148,8 +148,7 @@ contract SubgraphService is function acceptProvisionPendingParameters( address indexer, bytes calldata - ) external override onlyAuthorizedForProvision(indexer) onlyRegisteredIndexer(indexer) whenNotPaused { - _checkProvisionTokens(indexer); + ) external override onlyAuthorizedForProvision(indexer) whenNotPaused { _acceptProvisionParameters(indexer); emit ProvisionPendingParametersAccepted(indexer); } @@ -227,7 +226,7 @@ contract SubgraphService is _allocations.get(allocationId).indexer == indexer, SubgraphServiceAllocationNotAuthorized(indexer, allocationId) ); - _closeAllocation(allocationId); + _closeAllocation(allocationId, false); emit ServiceStopped(indexer, data); } @@ -268,12 +267,15 @@ contract SubgraphService is uint256 paymentCollected = 0; if (paymentType == IGraphPayments.PaymentTypes.QueryFee) { - IGraphTallyCollector.SignedRAV memory signedRav = abi.decode(data, (IGraphTallyCollector.SignedRAV)); + (IGraphTallyCollector.SignedRAV memory signedRav, uint256 tokensToCollect) = abi.decode( + data, + (IGraphTallyCollector.SignedRAV, uint256) + ); require( signedRav.rav.serviceProvider == indexer, SubgraphServiceIndexerMismatch(signedRav.rav.serviceProvider, indexer) ); - paymentCollected = _collectQueryFees(signedRav); + paymentCollected = _collectQueryFees(signedRav, tokensToCollect); } else if (paymentType == IGraphPayments.PaymentTypes.IndexingRewards) { (address allocationId, bytes32 poi) = abi.decode(data, (address, bytes32)); require( @@ -306,7 +308,7 @@ contract SubgraphService is Allocation.State memory allocation = _allocations.get(allocationId); require(allocation.isStale(maxPOIStaleness), SubgraphServiceCannotForceCloseAllocation(allocationId)); require(!allocation.isAltruistic(), SubgraphServiceAllocationIsAltruistic(allocationId)); - _closeAllocation(allocationId); + _closeAllocation(allocationId, true); } /// @inheritdoc ISubgraphService @@ -343,8 +345,8 @@ contract SubgraphService is } /// @inheritdoc ISubgraphService - function setRewardsDestination(address rewardsDestination) external override { - _setRewardsDestination(msg.sender, rewardsDestination); + function setRewardsDestination(address rewardsDestination_) external override { + _setRewardsDestination(msg.sender, rewardsDestination_); } /// @inheritdoc ISubgraphService @@ -363,8 +365,8 @@ contract SubgraphService is } /// @inheritdoc ISubgraphService - function setMaxPOIStaleness(uint256 maxPOIStaleness) external override onlyOwner { - _setMaxPOIStaleness(maxPOIStaleness); + function setMaxPOIStaleness(uint256 maxPOIStaleness_) external override onlyOwner { + _setMaxPOIStaleness(maxPOIStaleness_); } /// @inheritdoc ISubgraphService @@ -382,9 +384,10 @@ contract SubgraphService is /// @inheritdoc IRewardsIssuer function getAllocationData( address allocationId - ) external view override returns (address, bytes32, uint256, uint256, uint256) { + ) external view override returns (bool, address, bytes32, uint256, uint256, uint256) { Allocation.State memory allo = _allocations[allocationId]; return ( + allo.isOpen(), allo.indexer, allo.subgraphDeploymentId, allo.tokens, @@ -431,12 +434,14 @@ contract SubgraphService is // -- Data service parameter getters -- /** * @notice Getter for the accepted thawing period range for provisions + * The accepted range is just the dispute period defined by {DisputeManager-getDisputePeriod} * @dev This override ensures {ProvisionManager} uses the thawing period from the {DisputeManager} - * @return The minimum thawing period which is defined by {DisputeManager-getDisputePeriod} - * @return The maximum is unbounded + * @return The minimum thawing period - the dispute period + * @return The maximum thawing period - the dispute period */ function _getThawingPeriodRange() internal view override returns (uint64, uint64) { - return (_disputeManager().getDisputePeriod(), DEFAULT_MAX_THAWING_PERIOD); + uint64 disputePeriod = _disputeManager().getDisputePeriod(); + return (disputePeriod, disputePeriod); } /** @@ -472,9 +477,14 @@ contract SubgraphService is * Emits a {QueryFeesCollected} event. * * @param _signedRav Signed RAV + * @param tokensToCollect The amount of tokens to collect. Allows partially collecting a RAV. If 0, the entire RAV will + * be collected. * @return The amount of fees collected */ - function _collectQueryFees(IGraphTallyCollector.SignedRAV memory _signedRav) private returns (uint256) { + function _collectQueryFees( + IGraphTallyCollector.SignedRAV memory _signedRav, + uint256 tokensToCollect + ) private returns (uint256) { address indexer = _signedRav.rav.serviceProvider; // Check that collectionId (256 bits) is a valid address (160 bits) @@ -499,7 +509,8 @@ contract SubgraphService is uint256 curationCut = _curation().isCurated(subgraphDeploymentId) ? curationFeesCut : 0; uint256 tokensCollected = _graphTallyCollector().collect( IGraphPayments.PaymentTypes.QueryFee, - abi.encode(_signedRav, curationCut) + abi.encode(_signedRav, curationCut), + tokensToCollect ); uint256 balanceAfter = _graphToken().balanceOf(address(this)); @@ -522,7 +533,14 @@ contract SubgraphService is } } - emit QueryFeesCollected(indexer, _signedRav.rav.payer, tokensCollected, tokensCurators); + emit QueryFeesCollected( + indexer, + _signedRav.rav.payer, + allocationId, + subgraphDeploymentId, + tokensCollected, + tokensCurators + ); return tokensCollected; } diff --git a/packages/subgraph-service/contracts/interfaces/IDisputeManager.sol b/packages/subgraph-service/contracts/interfaces/IDisputeManager.sol index c8754ca44..611009bef 100644 --- a/packages/subgraph-service/contracts/interfaces/IDisputeManager.sol +++ b/packages/subgraph-service/contracts/interfaces/IDisputeManager.sol @@ -15,7 +15,8 @@ interface IDisputeManager { enum DisputeType { Null, IndexingDispute, - QueryDispute + QueryDispute, + LegacyDispute } /// @notice Status of a dispute @@ -37,6 +38,7 @@ interface IDisputeManager { * @param disputeType The type of dispute * @param status The status of the dispute * @param createdAt The timestamp when the dispute was created + * @param cancellableAt The timestamp when the dispute can be cancelled * @param stakeSnapshot The stake snapshot of the indexer at the time of the dispute (includes delegation up to the delegation ratio) */ struct Dispute { @@ -47,6 +49,7 @@ interface IDisputeManager { DisputeType disputeType; DisputeStatus status; uint256 createdAt; + uint256 cancellableAt; uint256 stakeSnapshot; } @@ -96,6 +99,7 @@ interface IDisputeManager { * @param tokens The amount of tokens deposited by the fisherman * @param subgraphDeploymentId The subgraph deployment id * @param attestation The attestation + * @param cancellableAt The timestamp when the dispute can be cancelled * @param stakeSnapshot The stake snapshot of the indexer at the time of the dispute */ event QueryDisputeCreated( @@ -105,7 +109,8 @@ interface IDisputeManager { uint256 tokens, bytes32 subgraphDeploymentId, bytes attestation, - uint256 stakeSnapshot + uint256 stakeSnapshot, + uint256 cancellableAt ); /** @@ -119,6 +124,7 @@ interface IDisputeManager { * @param allocationId The allocation id * @param poi The POI * @param stakeSnapshot The stake snapshot of the indexer at the time of the dispute + * @param cancellableAt The timestamp when the dispute can be cancelled */ event IndexingDisputeCreated( bytes32 indexed disputeId, @@ -127,7 +133,27 @@ interface IDisputeManager { uint256 tokens, address allocationId, bytes32 poi, - uint256 stakeSnapshot + uint256 stakeSnapshot, + uint256 cancellableAt + ); + + /** + * @dev Emitted when a legacy dispute is created for `allocationId` and `fisherman`. + * The event emits the amount of `tokensSlash` to slash and `tokensRewards` to reward the fisherman. + * @param disputeId The dispute id + * @param indexer The indexer address + * @param fisherman The fisherman address to be credited with the rewards + * @param allocationId The allocation id + * @param tokensSlash The amount of tokens to slash + * @param tokensRewards The amount of tokens to reward the fisherman + */ + event LegacyDisputeCreated( + bytes32 indexed disputeId, + address indexed indexer, + address indexed fisherman, + address allocationId, + uint256 tokensSlash, + uint256 tokensRewards ); /** @@ -436,6 +462,39 @@ interface IDisputeManager { */ function createIndexingDispute(address allocationId, bytes32 poi) external returns (bytes32); + /** + * @notice Creates and auto-accepts a legacy dispute. + * This disputes can be created to settle outstanding slashing amounts with an indexer that has been + * "legacy slashed" during or shortly after the transition period. See {HorizonStakingExtension.legacySlash} + * for more details. + * + * Note that this type of dispute: + * - can only be created by the arbitrator + * - does not require a bond + * - is automatically accepted when created + * + * Additionally, note that this type of disputes allow the arbitrator to directly set the slash and rewards + * amounts, bypassing the usual mechanisms that impose restrictions on those. This is done to give arbitrators + * maximum flexibility to ensure outstanding slashing amounts are settled fairly. This function needs to be removed + * after the transition period. + * + * Requirements: + * - Indexer must have been legacy slashed during or shortly after the transition period + * - Indexer must have provisioned funds to the Subgraph Service + * + * @param allocationId The allocation to dispute + * @param fisherman The fisherman address to be credited with the rewards + * @param tokensSlash The amount of tokens to slash + * @param tokensRewards The amount of tokens to reward the fisherman + * @return The dispute id + */ + function createAndAcceptLegacyDispute( + address allocationId, + address fisherman, + uint256 tokensSlash, + uint256 tokensRewards + ) external returns (bytes32); + // -- Arbitrator -- /** diff --git a/packages/subgraph-service/contracts/interfaces/ISubgraphService.sol b/packages/subgraph-service/contracts/interfaces/ISubgraphService.sol index 6ae78f5b7..af2a2a953 100644 --- a/packages/subgraph-service/contracts/interfaces/ISubgraphService.sol +++ b/packages/subgraph-service/contracts/interfaces/ISubgraphService.sol @@ -34,12 +34,16 @@ interface ISubgraphService is IDataServiceFees { * @notice Emitted when a subgraph service collects query fees from Graph Payments * @param serviceProvider The address of the service provider * @param payer The address paying for the query fees + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment * @param tokensCollected The amount of tokens collected * @param tokensCurators The amount of tokens curators receive */ event QueryFeesCollected( address indexed serviceProvider, address indexed payer, + address indexed allocationId, + bytes32 subgraphDeploymentId, uint256 tokensCollected, uint256 tokensCurators ); diff --git a/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol b/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol index b2d751c5f..3d7f96213 100644 --- a/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol +++ b/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol @@ -54,8 +54,7 @@ library LegacyAllocation { ) internal { require(!self[allocationId].exists(), LegacyAllocationAlreadyExists(allocationId)); - State memory allocation = State({ indexer: indexer, subgraphDeploymentId: subgraphDeploymentId }); - self[allocationId] = allocation; + self[allocationId] = State({ indexer: indexer, subgraphDeploymentId: subgraphDeploymentId }); } /** diff --git a/packages/subgraph-service/contracts/utilities/AllocationManager.sol b/packages/subgraph-service/contracts/utilities/AllocationManager.sol index 5e79f04b4..cd5ad6451 100644 --- a/packages/subgraph-service/contracts/utilities/AllocationManager.sol +++ b/packages/subgraph-service/contracts/utilities/AllocationManager.sol @@ -32,8 +32,8 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca using PPMMath for uint256; using TokenUtils for IGraphToken; - ///@dev EIP712 typehash for allocation proof - bytes32 private constant EIP712_ALLOCATION_PROOF_TYPEHASH = + ///@dev EIP712 typehash for allocation id proof + bytes32 private constant EIP712_ALLOCATION_ID_PROOF_TYPEHASH = keccak256("AllocationIdProof(address indexer,address allocationId)"); /** @@ -96,12 +96,14 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca * @param allocationId The id of the allocation * @param subgraphDeploymentId The id of the subgraph deployment * @param tokens The amount of tokens allocated + * @param forceClosed Whether the allocation was force closed */ event AllocationClosed( address indexed indexer, address indexed allocationId, bytes32 indexed subgraphDeploymentId, - uint256 tokens + uint256 tokens, + bool forceClosed ); /** @@ -198,7 +200,6 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca * @param _tokens The amount of tokens to allocate * @param _allocationProof Signed proof of allocation id address ownership * @param _delegationRatio The delegation ratio to consider when locking tokens - * @return The allocation details */ function _allocate( address _indexer, @@ -207,7 +208,7 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca uint256 _tokens, bytes memory _allocationProof, uint32 _delegationRatio - ) internal returns (Allocation.State memory) { + ) internal { require(_allocationId != address(0), AllocationManagerInvalidZeroAllocationId()); _verifyAllocationProof(_indexer, _allocationId, _allocationProof); @@ -236,7 +237,6 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca allocation.tokens; emit AllocationCreated(_indexer, _allocationId, _subgraphDeploymentId, allocation.tokens, currentEpoch); - return allocation; } /** @@ -335,9 +335,9 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca currentEpoch ); - // Check if the indexer is over-allocated and close the allocation if necessary + // Check if the indexer is over-allocated and force close the allocation if necessary if (_isOverAllocated(allocation.indexer, _delegationRatio)) { - _closeAllocation(_allocationId); + _closeAllocation(_allocationId, true); } return tokensRewards; @@ -359,13 +359,8 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca * @param _allocationId The id of the allocation to be resized * @param _tokens The new amount of tokens to allocate * @param _delegationRatio The delegation ratio to consider when locking tokens - * @return The allocation details */ - function _resizeAllocation( - address _allocationId, - uint256 _tokens, - uint32 _delegationRatio - ) internal returns (Allocation.State memory) { + function _resizeAllocation(address _allocationId, uint256 _tokens, uint32 _delegationRatio) internal { Allocation.State memory allocation = _allocations.get(_allocationId); require(allocation.isOpen(), AllocationManagerAllocationClosed(_allocationId)); require(_tokens != allocation.tokens, AllocationManagerAllocationSameSize(_allocationId, _tokens)); @@ -402,7 +397,6 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca } emit AllocationResized(allocation.indexer, _allocationId, allocation.subgraphDeploymentId, _tokens, oldTokens); - return _allocations[_allocationId]; } /** @@ -415,8 +409,9 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca * Emits a {AllocationClosed} event * * @param _allocationId The id of the allocation to be closed + * @param _forceClosed Whether the allocation was force closed */ - function _closeAllocation(address _allocationId) internal { + function _closeAllocation(address _allocationId, bool _forceClosed) internal { Allocation.State memory allocation = _allocations.get(_allocationId); // Take rewards snapshot to prevent other allos from counting tokens from this allo @@ -433,7 +428,13 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca _subgraphAllocatedTokens[allocation.subgraphDeploymentId] - allocation.tokens; - emit AllocationClosed(allocation.indexer, _allocationId, allocation.subgraphDeploymentId, allocation.tokens); + emit AllocationClosed( + allocation.indexer, + _allocationId, + allocation.subgraphDeploymentId, + allocation.tokens, + _forceClosed + ); } /** @@ -457,24 +458,6 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca emit MaxPOIStalenessSet(_maxPOIStaleness); } - /** - * @notice Gets the details of an allocation - * @param _allocationId The id of the allocation - * @return The allocation details - */ - function _getAllocation(address _allocationId) internal view returns (Allocation.State memory) { - return _allocations.get(_allocationId); - } - - /** - * @notice Gets the details of a legacy allocation - * @param _allocationId The id of the legacy allocation - * @return The legacy allocation details - */ - function _getLegacyAllocation(address _allocationId) internal view returns (LegacyAllocation.State memory) { - return _legacyAllocations.get(_allocationId); - } - /** * @notice Encodes the allocation proof for EIP712 signing * @param _indexer The address of the indexer @@ -482,7 +465,7 @@ abstract contract AllocationManager is EIP712Upgradeable, GraphDirectory, Alloca * @return The encoded allocation proof */ function _encodeAllocationProof(address _indexer, address _allocationId) internal view returns (bytes32) { - return _hashTypedDataV4(keccak256(abi.encode(EIP712_ALLOCATION_PROOF_TYPEHASH, _indexer, _allocationId))); + return _hashTypedDataV4(keccak256(abi.encode(EIP712_ALLOCATION_ID_PROOF_TYPEHASH, _indexer, _allocationId))); } /** diff --git a/packages/subgraph-service/test/disputeManager/DisputeManager.t.sol b/packages/subgraph-service/test/disputeManager/DisputeManager.t.sol index cfd5882b9..4ea784e46 100644 --- a/packages/subgraph-service/test/disputeManager/DisputeManager.t.sol +++ b/packages/subgraph-service/test/disputeManager/DisputeManager.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.27; import "forge-std/Test.sol"; +import { MathUtils } from "@graphprotocol/horizon/contracts/libraries/MathUtils.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; import { IDisputeManager } from "../../contracts/interfaces/IDisputeManager.sol"; import { Attestation } from "../../contracts/libraries/Attestation.sol"; @@ -76,6 +77,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { uint256 beforeFishermanBalance = token.balanceOf(fisherman); Allocation.State memory alloc = subgraphService.getAllocation(_allocationId); uint256 stakeSnapshot = disputeManager.getStakeSnapshot(alloc.indexer); + uint256 cancellableAt = block.timestamp + disputeManager.disputePeriod(); // Approve the dispute deposit token.approve(address(disputeManager), disputeDeposit); @@ -88,7 +90,8 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { disputeDeposit, _allocationId, _poi, - stakeSnapshot + stakeSnapshot, + cancellableAt ); // Create the indexing dispute @@ -144,6 +147,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { uint256 disputeDeposit = disputeManager.disputeDeposit(); uint256 beforeFishermanBalance = token.balanceOf(fisherman); uint256 stakeSnapshot = disputeManager.getStakeSnapshot(indexer); + uint256 cancellableAt = block.timestamp + disputeManager.disputePeriod(); // Approve the dispute deposit token.approve(address(disputeManager), disputeDeposit); @@ -156,6 +160,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { disputeDeposit, attestation.subgraphDeploymentId, _attestationData, + cancellableAt, stakeSnapshot ); @@ -195,6 +200,81 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { return _disputeID; } + struct Balances { + uint256 indexer; + uint256 fisherman; + uint256 arbitrator; + uint256 disputeManager; + uint256 staking; + } + + function _createAndAcceptLegacyDispute( + address _allocationId, + address _fisherman, + uint256 _tokensSlash, + uint256 _tokensRewards + ) internal returns (bytes32) { + (, address arbitrator, ) = vm.readCallers(); + address indexer = staking.getAllocation(_allocationId).indexer; + + Balances memory beforeBalances = Balances({ + indexer: token.balanceOf(indexer), + fisherman: token.balanceOf(_fisherman), + arbitrator: token.balanceOf(arbitrator), + disputeManager: token.balanceOf(address(disputeManager)), + staking: token.balanceOf(address(staking)) + }); + + vm.expectEmit(address(disputeManager)); + emit IDisputeManager.LegacyDisputeCreated( + keccak256(abi.encodePacked(_allocationId, "legacy")), + indexer, + _fisherman, + _allocationId, + _tokensSlash, + _tokensRewards + ); + vm.expectEmit(address(disputeManager)); + emit IDisputeManager.DisputeAccepted( + keccak256(abi.encodePacked(_allocationId, "legacy")), + indexer, + _fisherman, + _tokensRewards + ); + bytes32 _disputeId = disputeManager.createAndAcceptLegacyDispute( + _allocationId, + _fisherman, + _tokensSlash, + _tokensRewards + ); + + Balances memory afterBalances = Balances({ + indexer: token.balanceOf(indexer), + fisherman: token.balanceOf(_fisherman), + arbitrator: token.balanceOf(arbitrator), + disputeManager: token.balanceOf(address(disputeManager)), + staking: token.balanceOf(address(staking)) + }); + + assertEq(afterBalances.indexer, beforeBalances.indexer); + assertEq(afterBalances.fisherman, beforeBalances.fisherman + _tokensRewards); + assertEq(afterBalances.arbitrator, beforeBalances.arbitrator); + assertEq(afterBalances.disputeManager, beforeBalances.disputeManager); + assertEq(afterBalances.staking, beforeBalances.staking - _tokensSlash); + + IDisputeManager.Dispute memory dispute = _getDispute(_disputeId); + assertEq(dispute.indexer, indexer); + assertEq(dispute.fisherman, _fisherman); + assertEq(dispute.deposit, 0); + assertEq(dispute.relatedDisputeId, bytes32(0)); + assertEq(uint8(dispute.disputeType), uint8(IDisputeManager.DisputeType.LegacyDispute)); + assertEq(uint8(dispute.status), uint8(IDisputeManager.DisputeStatus.Accepted)); + assertEq(dispute.createdAt, block.timestamp); + assertEq(dispute.stakeSnapshot, 0); + + return _disputeId; + } + struct BeforeValues_CreateQueryDisputeConflict { Attestation.State attestation1; Attestation.State attestation2; @@ -242,6 +322,8 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { ) ); + uint256 cancellableAt = block.timestamp + disputeManager.disputePeriod(); + // createQueryDisputeConflict vm.expectEmit(address(disputeManager)); emit IDisputeManager.QueryDisputeCreated( @@ -251,6 +333,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { disputeDeposit / 2, beforeValues.attestation1.subgraphDeploymentId, attestationData1, + cancellableAt, beforeValues.stakeSnapshot1 ); vm.expectEmit(address(disputeManager)); @@ -261,6 +344,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { disputeDeposit / 2, beforeValues.attestation2.subgraphDeploymentId, attestationData2, + cancellableAt, beforeValues.stakeSnapshot2 ); @@ -329,8 +413,17 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { uint256 fishermanPreviousBalance = token.balanceOf(fisherman); uint256 indexerTokensAvailable = staking.getProviderTokensAvailable(dispute.indexer, address(subgraphService)); uint256 disputeDeposit = dispute.deposit; - uint256 fishermanRewardPercentage = disputeManager.fishermanRewardCut(); - uint256 fishermanReward = _tokensSlash.mulPPM(fishermanRewardPercentage); + uint256 fishermanReward; + { + uint32 provisionMaxVerifierCut = staking + .getProvision(dispute.indexer, address(subgraphService)) + .maxVerifierCut; + uint256 fishermanRewardPercentage = MathUtils.min( + disputeManager.fishermanRewardCut(), + provisionMaxVerifierCut + ); + fishermanReward = _tokensSlash.mulPPM(fishermanRewardPercentage); + } vm.expectEmit(address(disputeManager)); emit IDisputeManager.DisputeAccepted( @@ -603,12 +696,8 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { if (isDisputeInConflict) relatedDispute = _getDispute(dispute.relatedDisputeId); address fisherman = dispute.fisherman; uint256 fishermanPreviousBalance = token.balanceOf(fisherman); - uint256 disputePeriod = disputeManager.disputePeriod(); uint256 indexerTokensAvailable = staking.getProviderTokensAvailable(dispute.indexer, address(subgraphService)); - // skip to end of dispute period - skip(disputePeriod + 1); - vm.expectEmit(address(disputeManager)); emit IDisputeManager.DisputeCancelled(_disputeId, dispute.indexer, dispute.fisherman, dispute.deposit); @@ -715,6 +804,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { IDisputeManager.DisputeType disputeType, IDisputeManager.DisputeStatus status, uint256 createdAt, + uint256 cancellableAt, uint256 stakeSnapshot ) = disputeManager.disputes(_disputeId); return @@ -726,6 +816,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { disputeType: disputeType, status: status, createdAt: createdAt, + cancellableAt: cancellableAt, stakeSnapshot: stakeSnapshot }); } diff --git a/packages/subgraph-service/test/disputeManager/disputes/indexing/cancel.t.sol b/packages/subgraph-service/test/disputeManager/disputes/indexing/cancel.t.sol index e2aaaa541..977d1abf2 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/indexing/cancel.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/indexing/cancel.t.sol @@ -15,6 +15,10 @@ contract DisputeManagerIndexingCancelDisputeTest is DisputeManagerTest { resetPrank(users.fisherman); bytes32 disputeID = _createIndexingDispute(allocationID, bytes32("POI1")); + // skip to end of dispute period + uint256 disputePeriod = disputeManager.disputePeriod(); + skip(disputePeriod + 1); + _cancelDispute(disputeID); } @@ -38,4 +42,39 @@ contract DisputeManagerIndexingCancelDisputeTest is DisputeManagerTest { vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputePeriodNotFinished.selector)); disputeManager.cancelDispute(disputeID); } + + function test_Indexing_Cancel_After_DisputePeriodIncreased(uint256 tokens) public useIndexer useAllocation(tokens) { + resetPrank(users.fisherman); + bytes32 disputeID = _createIndexingDispute(allocationID, bytes32("POI1")); + + // change the dispute period to a higher value + uint256 oldDisputePeriod = disputeManager.disputePeriod(); + resetPrank(users.governor); + disputeManager.setDisputePeriod(uint64(oldDisputePeriod * 2)); + + // skip to end of old dispute period + skip(oldDisputePeriod + 1); + + // should be able to cancel + resetPrank(users.fisherman); + _cancelDispute(disputeID); + } + + function test_Indexing_Cancel_After_DisputePeriodDecreased(uint256 tokens) public useIndexer useAllocation(tokens) { + resetPrank(users.fisherman); + bytes32 disputeID = _createIndexingDispute(allocationID, bytes32("POI1")); + + // change the dispute period to a lower value + uint256 oldDisputePeriod = disputeManager.disputePeriod(); + resetPrank(users.governor); + disputeManager.setDisputePeriod(uint64(oldDisputePeriod / 2)); + + // skip to end of new dispute period + skip(oldDisputePeriod / 2 + 1); + + // should not be able to cancel + resetPrank(users.fisherman); + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputePeriodNotFinished.selector)); + disputeManager.cancelDispute(disputeID); + } } diff --git a/packages/subgraph-service/test/disputeManager/disputes/indexing/create.t.sol b/packages/subgraph-service/test/disputeManager/disputes/indexing/create.t.sol index 3b1df5011..b2ca9b8de 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/indexing/create.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/indexing/create.t.sol @@ -16,6 +16,30 @@ contract DisputeManagerIndexingCreateDisputeTest is DisputeManagerTest { _createIndexingDispute(allocationID, bytes32("POI1")); } + function test_Indexing_Create_Dispute_WithDelegation(uint256 tokens, uint256 delegationTokens) public useIndexer { + vm.assume(tokens >= minimumProvisionTokens); + vm.assume(tokens < 100_000_000 ether); // set a low cap to test overdelegation + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); + _register(users.indexer, abi.encode("url", "geoHash", address(0))); + bytes memory data = _createSubgraphAllocationData( + users.indexer, + subgraphDeployment, + allocationIDPrivateKey, + tokens + ); + _startService(users.indexer, data); + + uint256 delegationRatio = subgraphService.getDelegationRatio(); + delegationTokens = bound(delegationTokens, 1e18, tokens * delegationRatio * 2); // make sure we have overdelegation + + resetPrank(users.delegator); + token.approve(address(staking), delegationTokens); + staking.delegate(users.indexer, address(subgraphService), delegationTokens, 0); + + resetPrank(users.fisherman); + _createIndexingDispute(allocationID, bytes32("POI1")); + } + function test_Indexing_Create_Dispute_RevertWhen_SubgraphServiceNotSet( uint256 tokens ) public useIndexer useAllocation(tokens) { @@ -42,7 +66,7 @@ contract DisputeManagerIndexingCreateDisputeTest is DisputeManagerTest { resetPrank(indexer); mint(indexer, tokens); - _createProvision(indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(indexer, abi.encode("url", "geoHash", address(0))); uint256 allocationIDPrivateKey = uint256(keccak256(abi.encodePacked(i))); bytes memory data = _createSubgraphAllocationData( diff --git a/packages/subgraph-service/test/disputeManager/disputes/legacy.t.sol b/packages/subgraph-service/test/disputeManager/disputes/legacy.t.sol new file mode 100644 index 000000000..f3055869a --- /dev/null +++ b/packages/subgraph-service/test/disputeManager/disputes/legacy.t.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.27; + +import "forge-std/Test.sol"; + +import { Attestation } from "../../../contracts/libraries/Attestation.sol"; +import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; +import { IDisputeManager } from "../../../contracts/interfaces/IDisputeManager.sol"; +import { DisputeManagerTest } from "../DisputeManager.t.sol"; + +contract DisputeManagerLegacyDisputeTest is DisputeManagerTest { + using PPMMath for uint256; + + bytes32 private requestCID = keccak256(abi.encodePacked("Request CID")); + bytes32 private responseCID = keccak256(abi.encodePacked("Response CID")); + bytes32 private subgraphDeploymentId = keccak256(abi.encodePacked("Subgraph Deployment ID")); + + /* + * TESTS + */ + + function test_LegacyDispute( + uint256 tokensStaked, + uint256 tokensProvisioned, + uint256 tokensSlash, + uint256 tokensRewards + ) public { + vm.assume(tokensStaked <= MAX_TOKENS); + vm.assume(tokensStaked >= minimumProvisionTokens); + tokensProvisioned = bound(tokensProvisioned, minimumProvisionTokens, tokensStaked); + tokensSlash = bound(tokensSlash, 2, tokensProvisioned); + tokensRewards = bound(tokensRewards, 1, tokensSlash.mulPPM(fishermanRewardPercentage)); + + // setup indexer state + resetPrank(users.indexer); + _stake(tokensStaked); + _setStorage_allocation_hardcoded(users.indexer, allocationID, tokensStaked - tokensProvisioned); + _provision(users.indexer, tokensProvisioned, fishermanRewardPercentage, disputePeriod); + + resetPrank(users.arbitrator); + _createAndAcceptLegacyDispute(allocationID, users.fisherman, tokensSlash, tokensRewards); + } + + function test_LegacyDispute_RevertIf_NotArbitrator() public useIndexer { + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerNotArbitrator.selector)); + disputeManager.createAndAcceptLegacyDispute(allocationID, users.fisherman, 0, 0); + } + + function test_LegacyDispute_RevertIf_AllocationNotFound() public useIndexer { + resetPrank(users.arbitrator); + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerIndexerNotFound.selector, address(0))); + disputeManager.createAndAcceptLegacyDispute(address(0), users.fisherman, 0, 0); + } +} diff --git a/packages/subgraph-service/test/disputeManager/disputes/query/accept.t.sol b/packages/subgraph-service/test/disputeManager/disputes/query/accept.t.sol index 9cfbaae10..86405ad43 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/query/accept.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/query/accept.t.sol @@ -118,4 +118,108 @@ contract DisputeManagerQueryAcceptDisputeTest is DisputeManagerTest { vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputeNotInConflict.selector, disputeID)); disputeManager.acceptDisputeConflict(disputeID, tokensSlash, true, 0); } + + function test_Query_Accept_RevertWhen_SlashingOverMaxSlashPercentage_WithDelegation( + uint256 tokens, + uint256 tokensDelegated, + uint256 tokensSlash + ) public useIndexer useAllocation(tokens) useDelegation(tokensDelegated) { + uint256 maxTokensToSlash = uint256(maxSlashingPercentage).mulPPM( + _calculateStakeSnapshot(tokens, tokensDelegated) + ); + tokensSlash = bound(tokensSlash, maxTokensToSlash + 1, type(uint256).max); + + resetPrank(users.fisherman); + Attestation.Receipt memory receipt = _createAttestationReceipt(requestCID, responseCID, subgraphDeploymentId); + bytes memory attestationData = _createAtestationData(receipt, allocationIDPrivateKey); + bytes32 disputeID = _createQueryDispute(attestationData); + + // max slashing percentage is 50% + resetPrank(users.arbitrator); + bytes memory expectedError = abi.encodeWithSelector( + IDisputeManager.DisputeManagerInvalidTokensSlash.selector, + tokensSlash, + maxTokensToSlash + ); + vm.expectRevert(expectedError); + disputeManager.acceptDispute(disputeID, tokensSlash); + } + + function test_Query_Accept_RevertWhen_SlashingOverMaxSlashPercentage_WithDelegation_DelegationSlashing( + uint256 tokens, + uint256 tokensDelegated, + uint256 tokensSlash + ) public useIndexer useAllocation(tokens) useDelegation(tokensDelegated) { + // enable delegation slashing + resetPrank(users.governor); + staking.setDelegationSlashingEnabled(); + + resetPrank(users.fisherman); + uint256 maxTokensToSlash = uint256(maxSlashingPercentage).mulPPM( + _calculateStakeSnapshot(tokens, tokensDelegated) + ); + tokensSlash = bound(tokensSlash, maxTokensToSlash + 1, type(uint256).max); + + // Create a new dispute with delegation slashing enabled + resetPrank(users.fisherman); + Attestation.Receipt memory receipt = _createAttestationReceipt(requestCID, responseCID, subgraphDeploymentId); + bytes memory attestationData = _createAtestationData(receipt, allocationIDPrivateKey); + bytes32 disputeID = _createQueryDispute(attestationData); + + // max slashing percentage is 50% + resetPrank(users.arbitrator); + bytes memory expectedError = abi.encodeWithSelector( + IDisputeManager.DisputeManagerInvalidTokensSlash.selector, + tokensSlash, + maxTokensToSlash + ); + vm.expectRevert(expectedError); + disputeManager.acceptDispute(disputeID, tokensSlash); + } + + function test_Query_Accept_Dispute_AfterFishermanRewardCutIncreased( + uint256 tokens, + uint256 tokensSlash + ) public useIndexer { + vm.assume(tokens >= minimumProvisionTokens); + vm.assume(tokens < 10_000_000_000 ether); + tokensSlash = bound(tokensSlash, 1, uint256(maxSlashingPercentage).mulPPM(tokens)); + + // Set fishermanRewardCut to 25% + resetPrank(users.governor); + uint32 oldFishermanRewardCut = 250_000; + disputeManager.setFishermanRewardCut(oldFishermanRewardCut); + + // Create provision with maxVerifierCut == fishermanRewardCut and allocate + resetPrank(users.indexer); + _createProvision(users.indexer, tokens, oldFishermanRewardCut, disputePeriod); + _register(users.indexer, abi.encode("url", "geoHash", address(0))); + bytes memory data = _createSubgraphAllocationData( + users.indexer, + subgraphDeployment, + allocationIDPrivateKey, + tokens + ); + _startService(users.indexer, data); + + // Create a dispute with prov.maxVerifierCut == fishermanRewardCut + uint256 beforeFishermanBalance = token.balanceOf(users.fisherman); + resetPrank(users.fisherman); + Attestation.Receipt memory receipt = _createAttestationReceipt(requestCID, responseCID, subgraphDeploymentId); + bytes memory attestationData = _createAtestationData(receipt, allocationIDPrivateKey); + bytes32 disputeID = _createQueryDispute(attestationData); + + // Now bump the fishermanRewardCut to 50% + resetPrank(users.governor); + disputeManager.setFishermanRewardCut(500_000); + + // Accept the dispute + resetPrank(users.arbitrator); + _acceptDispute(disputeID, tokensSlash); + + // Check that the fisherman received the correct amount of tokens + // which should use the old fishermanRewardCut + uint256 afterFishermanBalance = token.balanceOf(users.fisherman); + assertEq(afterFishermanBalance, beforeFishermanBalance + tokensSlash.mulPPM(oldFishermanRewardCut)); + } } diff --git a/packages/subgraph-service/test/disputeManager/disputes/query/cancel.t.sol b/packages/subgraph-service/test/disputeManager/disputes/query/cancel.t.sol index 471d1620a..09374016a 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/query/cancel.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/query/cancel.t.sol @@ -22,6 +22,10 @@ contract DisputeManagerQueryCancelDisputeTest is DisputeManagerTest { bytes memory attestationData = _createAtestationData(receipt, allocationIDPrivateKey); bytes32 disputeID = _createQueryDispute(attestationData); + // skip to end of dispute period + uint256 disputePeriod = disputeManager.disputePeriod(); + skip(disputePeriod + 1); + _cancelDispute(disputeID); } @@ -45,4 +49,43 @@ contract DisputeManagerQueryCancelDisputeTest is DisputeManagerTest { vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputePeriodNotFinished.selector)); disputeManager.cancelDispute(disputeID); } + + function test_Query_Cancel_After_DisputePeriodIncreased(uint256 tokens) public useIndexer useAllocation(tokens) { + resetPrank(users.fisherman); + Attestation.Receipt memory receipt = _createAttestationReceipt(requestCID, responseCID, subgraphDeploymentId); + bytes memory attestationData = _createAtestationData(receipt, allocationIDPrivateKey); + bytes32 disputeID = _createQueryDispute(attestationData); + + // change the dispute period to a higher value + uint256 oldDisputePeriod = disputeManager.disputePeriod(); + resetPrank(users.governor); + disputeManager.setDisputePeriod(uint64(oldDisputePeriod * 2)); + + // skip to end of old dispute period + skip(oldDisputePeriod + 1); + + // should be able to cancel + resetPrank(users.fisherman); + _cancelDispute(disputeID); + } + + function test_Query_Cancel_After_DisputePeriodDecreased(uint256 tokens) public useIndexer useAllocation(tokens) { + resetPrank(users.fisherman); + Attestation.Receipt memory receipt = _createAttestationReceipt(requestCID, responseCID, subgraphDeploymentId); + bytes memory attestationData = _createAtestationData(receipt, allocationIDPrivateKey); + bytes32 disputeID = _createQueryDispute(attestationData); + + // change the dispute period to a lower value + uint256 oldDisputePeriod = disputeManager.disputePeriod(); + resetPrank(users.governor); + disputeManager.setDisputePeriod(uint64(oldDisputePeriod / 2)); + + // skip to end of new dispute period + skip(oldDisputePeriod / 2 + 1); + + // should not be able to cancel + resetPrank(users.fisherman); + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputePeriodNotFinished.selector)); + disputeManager.cancelDispute(disputeID); + } } diff --git a/packages/subgraph-service/test/disputeManager/disputes/query/create.t.sol b/packages/subgraph-service/test/disputeManager/disputes/query/create.t.sol index 843318995..020749a30 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/query/create.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/query/create.t.sol @@ -78,7 +78,7 @@ contract DisputeManagerQueryCreateDisputeTest is DisputeManagerTest { uint256 newAllocationIDKey = uint256(keccak256(abi.encodePacked("newAllocationID"))); mint(newIndexer, tokens); resetPrank(newIndexer); - _createProvision(newIndexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(newIndexer, tokens, fishermanRewardPercentage, disputePeriod); _register(newIndexer, abi.encode("url", "geoHash", 0x0)); bytes memory data = _createSubgraphAllocationData(newIndexer, subgraphDeployment, newAllocationIDKey, tokens); _startService(newIndexer, data); diff --git a/packages/subgraph-service/test/disputeManager/disputes/queryConflict/accept.t.sol b/packages/subgraph-service/test/disputeManager/disputes/queryConflict/accept.t.sol index f145277af..6f805794a 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/queryConflict/accept.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/queryConflict/accept.t.sol @@ -150,7 +150,7 @@ contract DisputeManagerQueryConflictAcceptDisputeTest is DisputeManagerTest { mint(differentIndexer, tokensSecondIndexer); uint256 differentIndexerAllocationIDPrivateKey = uint256(keccak256(abi.encodePacked(differentIndexer))); resetPrank(differentIndexer); - _createProvision(differentIndexer, tokensSecondIndexer, maxSlashingPercentage, disputePeriod); + _createProvision(differentIndexer, tokensSecondIndexer, fishermanRewardPercentage, disputePeriod); _register(differentIndexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _createSubgraphAllocationData( differentIndexer, diff --git a/packages/subgraph-service/test/disputeManager/disputes/queryConflict/cancel.t.sol b/packages/subgraph-service/test/disputeManager/disputes/queryConflict/cancel.t.sol index a368fc443..1d8d3c6eb 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/queryConflict/cancel.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/queryConflict/cancel.t.sol @@ -28,6 +28,10 @@ contract DisputeManagerQueryConflictCancelDisputeTest is DisputeManagerTest { resetPrank(users.fisherman); (bytes32 disputeID1, ) = _createQueryDisputeConflict(attestationData1, attestationData2); + // skip to end of dispute period + uint256 disputePeriod = disputeManager.disputePeriod(); + skip(disputePeriod + 1); + _cancelDispute(disputeID1); } @@ -70,4 +74,60 @@ contract DisputeManagerQueryConflictCancelDisputeTest is DisputeManagerTest { vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputePeriodNotFinished.selector)); disputeManager.cancelDispute(disputeID1); } + + function test_Query_Conflict_Cancel_After_DisputePeriodIncreased( + uint256 tokens + ) public useIndexer useAllocation(tokens) { + resetPrank(users.fisherman); + (bytes memory attestationData1, bytes memory attestationData2) = _createConflictingAttestations( + requestCID, + subgraphDeployment, + responseCID1, + responseCID2, + allocationIDPrivateKey, + allocationIDPrivateKey + ); + + resetPrank(users.fisherman); + (bytes32 disputeID1, ) = _createQueryDisputeConflict(attestationData1, attestationData2); + + // change the dispute period to a higher value + uint256 oldDisputePeriod = disputeManager.disputePeriod(); + resetPrank(users.governor); + disputeManager.setDisputePeriod(uint64(oldDisputePeriod * 2)); + + // skip to end of old dispute period + skip(oldDisputePeriod + 1); + + // should be able to cancel + resetPrank(users.fisherman); + _cancelDispute(disputeID1); + } + + function test_Query_Cancel_After_DisputePeriodDecreased(uint256 tokens) public useIndexer useAllocation(tokens) { + (bytes memory attestationData1, bytes memory attestationData2) = _createConflictingAttestations( + requestCID, + subgraphDeployment, + responseCID1, + responseCID2, + allocationIDPrivateKey, + allocationIDPrivateKey + ); + + resetPrank(users.fisherman); + (bytes32 disputeID1, ) = _createQueryDisputeConflict(attestationData1, attestationData2); + + // change the dispute period to a lower value + uint256 oldDisputePeriod = disputeManager.disputePeriod(); + resetPrank(users.governor); + disputeManager.setDisputePeriod(uint64(oldDisputePeriod / 2)); + + // skip to end of new dispute period + skip(oldDisputePeriod / 2 + 1); + + // should not be able to cancel + resetPrank(users.fisherman); + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputePeriodNotFinished.selector)); + disputeManager.cancelDispute(disputeID1); + } } diff --git a/packages/subgraph-service/test/disputeManager/disputes/queryConflict/create.t.sol b/packages/subgraph-service/test/disputeManager/disputes/queryConflict/create.t.sol index de08a7de4..4b185a850 100644 --- a/packages/subgraph-service/test/disputeManager/disputes/queryConflict/create.t.sol +++ b/packages/subgraph-service/test/disputeManager/disputes/queryConflict/create.t.sol @@ -38,7 +38,7 @@ contract DisputeManagerQueryConflictCreateDisputeTest is DisputeManagerTest { uint256 newAllocationIDKey = uint256(keccak256(abi.encodePacked("newAllocationID"))); mint(newIndexer, tokens); resetPrank(newIndexer); - _createProvision(newIndexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(newIndexer, tokens, fishermanRewardPercentage, disputePeriod); _register(newIndexer, abi.encode("url", "geoHash", 0x0)); bytes memory data = _createSubgraphAllocationData(newIndexer, subgraphDeployment, newAllocationIDKey, tokens); _startService(newIndexer, data); diff --git a/packages/subgraph-service/test/mocks/MockRewardsManager.sol b/packages/subgraph-service/test/mocks/MockRewardsManager.sol index dd94d6a2c..3279e114c 100644 --- a/packages/subgraph-service/test/mocks/MockRewardsManager.sol +++ b/packages/subgraph-service/test/mocks/MockRewardsManager.sol @@ -14,7 +14,13 @@ interface IRewardsIssuer { ) external view - returns (address indexer, bytes32 subgraphDeploymentId, uint256 tokens, uint256 accRewardsPerAllocatedToken); + returns ( + bool isActive, + address indexer, + bytes32 subgraphDeploymentId, + uint256 tokens, + uint256 accRewardsPerAllocatedToken + ); } contract MockRewardsManager is IRewardsManager { @@ -71,9 +77,12 @@ contract MockRewardsManager is IRewardsManager { function takeRewards(address _allocationID) external returns (uint256) { address rewardsIssuer = msg.sender; - (, , uint256 tokens, uint256 accRewardsPerAllocatedToken) = IRewardsIssuer(rewardsIssuer).getAllocationData( - _allocationID - ); + (bool isActive, , , uint256 tokens, uint256 accRewardsPerAllocatedToken) = IRewardsIssuer(rewardsIssuer) + .getAllocationData(_allocationID); + + if (!isActive) { + return 0; + } uint256 accRewardsPerTokens = tokens.mulPPM(rewardsPerSignal); uint256 rewards = accRewardsPerTokens - accRewardsPerAllocatedToken; diff --git a/packages/subgraph-service/test/shared/HorizonStakingShared.t.sol b/packages/subgraph-service/test/shared/HorizonStakingShared.t.sol index 66915d043..55990a2b7 100644 --- a/packages/subgraph-service/test/shared/HorizonStakingShared.t.sol +++ b/packages/subgraph-service/test/shared/HorizonStakingShared.t.sol @@ -14,6 +14,15 @@ abstract contract HorizonStakingSharedTest is SubgraphBaseTest { * HELPERS */ + function _provision( + address _indexer, + uint256 _tokens, + uint32 _maxSlashingPercentage, + uint64 _disputePeriod + ) internal { + staking.provision(_indexer, address(subgraphService), _tokens, _maxSlashingPercentage, _disputePeriod); + } + function _createProvision( address _indexer, uint256 _tokens, @@ -129,12 +138,13 @@ abstract contract HorizonStakingSharedTest is SubgraphBaseTest { vm.store(address(staking), subgraphAllocationsBaseSlot, bytes32(currentAllocatedTokens + tokens)); } - /* - * PRIVATE - */ - - function _stakeTo(address _indexer, uint256 _tokens) private { + function _stakeTo(address _indexer, uint256 _tokens) internal { token.approve(address(staking), _tokens); staking.stakeTo(_indexer, _tokens); } + + function _stake(uint256 _tokens) internal { + token.approve(address(staking), _tokens); + staking.stake(_tokens); + } } diff --git a/packages/subgraph-service/test/shared/SubgraphServiceShared.t.sol b/packages/subgraph-service/test/shared/SubgraphServiceShared.t.sol index 8a51d3443..e2cf62e83 100644 --- a/packages/subgraph-service/test/shared/SubgraphServiceShared.t.sol +++ b/packages/subgraph-service/test/shared/SubgraphServiceShared.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.27; import "forge-std/Test.sol"; +import { MathUtils } from "@graphprotocol/horizon/contracts/libraries/MathUtils.sol"; import { Allocation } from "../../contracts/libraries/Allocation.sol"; import { AllocationManager } from "../../contracts/utilities/AllocationManager.sol"; import { IDataService } from "@graphprotocol/horizon/contracts/data-service/interfaces/IDataService.sol"; @@ -34,7 +35,7 @@ abstract contract SubgraphServiceSharedTest is HorizonStakingSharedTest { modifier useAllocation(uint256 tokens) { vm.assume(tokens >= minimumProvisionTokens); vm.assume(tokens < 10_000_000_000 ether); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _createSubgraphAllocationData( users.indexer, @@ -145,7 +146,8 @@ abstract contract SubgraphServiceSharedTest is HorizonStakingSharedTest { _indexer, allocationId, allocation.subgraphDeploymentId, - allocation.tokens + allocation.tokens, + false ); emit IDataService.ServiceStopped(_indexer, _data); @@ -185,6 +187,10 @@ abstract contract SubgraphServiceSharedTest is HorizonStakingSharedTest { staking.delegate(users.indexer, address(subgraphService), tokens, 0); } + function _calculateStakeSnapshot(uint256 _tokens, uint256 _tokensDelegated) internal view returns (uint256) { + return _tokens + _tokensDelegated; + } + /* * PRIVATE FUNCTIONS */ diff --git a/packages/subgraph-service/test/subgraphService/SubgraphService.t.sol b/packages/subgraph-service/test/subgraphService/SubgraphService.t.sol index 2053083c8..66117c32b 100644 --- a/packages/subgraph-service/test/subgraphService/SubgraphService.t.sol +++ b/packages/subgraph-service/test/subgraphService/SubgraphService.t.sol @@ -160,7 +160,8 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { allocation.indexer, _allocationId, allocation.subgraphDeploymentId, - allocation.tokens + allocation.tokens, + true ); // close stale allocation @@ -277,10 +278,14 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { address _indexer, bytes memory _data ) private returns (uint256 paymentCollected) { - IGraphTallyCollector.SignedRAV memory signedRav = abi.decode(_data, (IGraphTallyCollector.SignedRAV)); - Allocation.State memory allocation = subgraphService.getAllocation( - address(uint160(uint256(signedRav.rav.collectionId))) + (IGraphTallyCollector.SignedRAV memory signedRav, uint256 tokensToCollect) = abi.decode( + _data, + (IGraphTallyCollector.SignedRAV, uint256) ); + address allocationId = address(uint160(uint256(signedRav.rav.collectionId))); + Allocation.State memory allocation = subgraphService.getAllocation(allocationId); + bytes32 subgraphDeploymentId = allocation.subgraphDeploymentId; + address payer = graphTallyCollector.isAuthorized(signedRav.rav.payer, _recoverRAVSigner(signedRav)) ? signedRav.rav.payer : address(0); @@ -291,14 +296,21 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { _indexer, payer ); - paymentCollected = signedRav.rav.valueAggregate - tokensCollected; + paymentCollected = tokensToCollect == 0 ? signedRav.rav.valueAggregate - tokensCollected : tokensToCollect; QueryFeeData memory queryFeeData = _queryFeeData(allocation.subgraphDeploymentId); uint256 tokensProtocol = paymentCollected.mulPPMRoundUp(queryFeeData.protocolPaymentCut); uint256 tokensCurators = (paymentCollected - tokensProtocol).mulPPMRoundUp(queryFeeData.curationCut); vm.expectEmit(address(subgraphService)); - emit ISubgraphService.QueryFeesCollected(_indexer, payer, paymentCollected, tokensCurators); + emit ISubgraphService.QueryFeesCollected( + _indexer, + payer, + allocationId, + subgraphDeploymentId, + paymentCollected, + tokensCurators + ); return paymentCollected; } @@ -361,7 +373,10 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { CollectPaymentData memory collectPaymentDataBefore, CollectPaymentData memory collectPaymentDataAfter ) private view { - IGraphTallyCollector.SignedRAV memory signedRav = abi.decode(_data, (IGraphTallyCollector.SignedRAV)); + (IGraphTallyCollector.SignedRAV memory signedRav, uint256 tokensToCollect) = abi.decode( + _data, + (IGraphTallyCollector.SignedRAV, uint256) + ); Allocation.State memory allocation = subgraphService.getAllocation( address(uint160(uint256(signedRav.rav.collectionId))) ); @@ -466,7 +481,7 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { resetPrank(_indexer); token.approve(address(staking), _tokens); staking.stakeTo(_indexer, _tokens); - staking.provision(_indexer, address(subgraphService), _tokens, maxSlashingPercentage, disputePeriod); + staking.provision(_indexer, address(subgraphService), _tokens, fishermanRewardPercentage, disputePeriod); _register(_indexer, abi.encode("url", "geoHash", address(0))); (address newIndexerAllocationId, uint256 newIndexerAllocationKey) = makeAddrAndKey("newIndexerAllocationId"); diff --git a/packages/subgraph-service/test/subgraphService/allocation/forceClose.t.sol b/packages/subgraph-service/test/subgraphService/allocation/forceClose.t.sol index 1ac1383ab..9ade5bb2c 100644 --- a/packages/subgraph-service/test/subgraphService/allocation/forceClose.t.sol +++ b/packages/subgraph-service/test/subgraphService/allocation/forceClose.t.sol @@ -78,7 +78,7 @@ contract SubgraphServiceAllocationForceCloseTest is SubgraphServiceTest { function test_SubgraphService_Allocation_ForceClose_RevertIf_Altruistic(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _createSubgraphAllocationData(users.indexer, subgraphDeployment, allocationIDPrivateKey, 0); diff --git a/packages/subgraph-service/test/subgraphService/allocation/overDelegated.t.sol b/packages/subgraph-service/test/subgraphService/allocation/overDelegated.t.sol index d1f2f5ff0..60149be19 100644 --- a/packages/subgraph-service/test/subgraphService/allocation/overDelegated.t.sol +++ b/packages/subgraph-service/test/subgraphService/allocation/overDelegated.t.sol @@ -30,7 +30,7 @@ contract SubgraphServiceAllocationOverDelegatedTest is SubgraphServiceTest { // Create provision token.approve(address(staking), indexerTokens); - _createProvision(users.indexer, indexerTokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, indexerTokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); // Delegate so that indexer is over allocated diff --git a/packages/subgraph-service/test/subgraphService/allocation/start.t.sol b/packages/subgraph-service/test/subgraphService/allocation/start.t.sol index 15e48dc66..b568bd6a3 100644 --- a/packages/subgraph-service/test/subgraphService/allocation/start.t.sol +++ b/packages/subgraph-service/test/subgraphService/allocation/start.t.sol @@ -21,7 +21,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _generateData(tokens); @@ -31,7 +31,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_AllowsZeroTokens(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _generateData(0); @@ -41,7 +41,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_ByOperator(uint256 tokens) public useOperator { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _generateData(tokens); @@ -51,7 +51,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_NotAuthorized(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); resetPrank(users.operator); @@ -79,7 +79,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_NotRegistered(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); bytes memory data = _generateData(tokens); vm.expectRevert( @@ -91,7 +91,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_ZeroAllocationId(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes32 digest = subgraphService.encodeAllocationProof(users.indexer, address(0)); @@ -104,7 +104,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_InvalidSignature(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); (address signer, uint256 signerPrivateKey) = makeAddrAndKey("invalidSigner"); @@ -124,7 +124,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_InvalidData(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = abi.encode(subgraphDeployment, tokens, allocationID, _generateRandomHexBytes(32)); @@ -137,7 +137,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { ) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _generateData(tokens); @@ -150,7 +150,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_AlreadyExists_Migrated(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); resetPrank(users.governor); @@ -165,7 +165,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { function test_SubgraphService_Allocation_Start_RevertWhen_AlreadyExists_Staking(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); // create dummy allo in staking contract @@ -183,7 +183,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS - 1); lockTokens = bound(lockTokens, tokens + 1, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _generateData(lockTokens); diff --git a/packages/subgraph-service/test/subgraphService/collect/indexing/indexing.t.sol b/packages/subgraph-service/test/subgraphService/collect/indexing/indexing.t.sol index a35f37ecc..4f0d6769d 100644 --- a/packages/subgraph-service/test/subgraphService/collect/indexing/indexing.t.sol +++ b/packages/subgraph-service/test/subgraphService/collect/indexing/indexing.t.sol @@ -7,7 +7,7 @@ import { IGraphPayments } from "@graphprotocol/horizon/contracts/interfaces/IGra import { ISubgraphService } from "../../../../contracts/interfaces/ISubgraphService.sol"; import { SubgraphServiceTest } from "../../SubgraphService.t.sol"; - +import { Allocation } from "../../../../contracts/libraries/Allocation.sol"; contract SubgraphServiceCollectIndexingTest is SubgraphServiceTest { /* * TESTS @@ -127,7 +127,7 @@ contract SubgraphServiceCollectIndexingTest is SubgraphServiceTest { tokens = bound(tokens, minimumProvisionTokens * 2, 10_000_000_000 ether); // setup allocation - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); bytes memory data = _createSubgraphAllocationData( users.indexer, diff --git a/packages/subgraph-service/test/subgraphService/collect/query/query.t.sol b/packages/subgraph-service/test/subgraphService/collect/query/query.t.sol index 28cc677cf..10068f745 100644 --- a/packages/subgraph-service/test/subgraphService/collect/query/query.t.sol +++ b/packages/subgraph-service/test/subgraphService/collect/query/query.t.sol @@ -39,7 +39,11 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { return abi.encodePacked(r, s, v); } - function _getQueryFeeEncodedData(address indexer, uint128 tokens) private view returns (bytes memory) { + function _getQueryFeeEncodedData( + address indexer, + uint128 tokens, + uint256 tokensToCollect + ) private view returns (bytes memory) { IGraphTallyCollector.ReceiptAggregateVoucher memory rav = _getRAV( indexer, bytes32(uint256(uint160(allocationID))), @@ -49,7 +53,7 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { (uint8 v, bytes32 r, bytes32 s) = vm.sign(signerPrivateKey, messageHash); bytes memory signature = abi.encodePacked(r, s, v); IGraphTallyCollector.SignedRAV memory signedRAV = IGraphTallyCollector.SignedRAV(rav, signature); - return abi.encode(signedRAV); + return abi.encode(signedRAV, tokensToCollect); } function _getRAV( @@ -109,7 +113,7 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { _authorizeSigner(); resetPrank(users.indexer); - bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokensPayment)); + bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokensPayment), 0); _collect(users.indexer, IGraphPayments.PaymentTypes.QueryFee, data); } @@ -129,14 +133,14 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { uint256 accTokensPayment = 0; for (uint i = 0; i < numPayments; i++) { accTokensPayment = accTokensPayment + tokensPayment; - bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(accTokensPayment)); + bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(accTokensPayment), 0); _collect(users.indexer, IGraphPayments.PaymentTypes.QueryFee, data); } } function testCollect_RevertWhen_NotAuthorized(uint256 tokens) public useIndexer useAllocation(tokens) { IGraphPayments.PaymentTypes paymentType = IGraphPayments.PaymentTypes.QueryFee; - bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokens)); + bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokens), 0); resetPrank(users.operator); vm.expectRevert( abi.encodeWithSelector( @@ -157,7 +161,7 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { _createAndStartAllocation(newIndexer, tokens); // This data is for user.indexer allocationId - bytes memory data = _getQueryFeeEncodedData(newIndexer, uint128(tokens)); + bytes memory data = _getQueryFeeEncodedData(newIndexer, uint128(tokens), 0); resetPrank(newIndexer); vm.expectRevert( @@ -173,7 +177,7 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { // Setup new indexer address newIndexer = makeAddr("newIndexer"); _createAndStartAllocation(newIndexer, tokens); - bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokens)); + bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokens), 0); vm.expectRevert( abi.encodeWithSelector(ISubgraphService.SubgraphServiceIndexerMismatch.selector, users.indexer, newIndexer) ); @@ -193,4 +197,66 @@ contract SubgraphServiceRegisterTest is SubgraphServiceTest { ); subgraphService.collect(users.indexer, IGraphPayments.PaymentTypes.QueryFee, data); } + + function testCollect_QueryFees_PartialCollect( + uint256 tokensAllocated, + uint256 tokensPayment + ) public useIndexer useAllocation(tokensAllocated) { + vm.assume(tokensAllocated > minimumProvisionTokens * stakeToFeesRatio); + uint256 maxTokensPayment = tokensAllocated / stakeToFeesRatio > type(uint128).max + ? type(uint128).max + : tokensAllocated / stakeToFeesRatio; + tokensPayment = bound(tokensPayment, minimumProvisionTokens, maxTokensPayment); + + resetPrank(users.gateway); + _deposit(tokensPayment); + _authorizeSigner(); + + uint256 beforeGatewayBalance = escrow.getBalance(users.gateway, address(graphTallyCollector), users.indexer); + uint256 beforeTokensCollected = graphTallyCollector.tokensCollected( + address(subgraphService), + bytes32(uint256(uint160(allocationID))), + users.indexer, + users.gateway + ); + + // Collect the RAV in two steps + resetPrank(users.indexer); + uint256 tokensToCollect = tokensPayment / 2; + bool oddTokensPayment = tokensPayment % 2 == 1; + bytes memory data = _getQueryFeeEncodedData(users.indexer, uint128(tokensPayment), tokensToCollect); + _collect(users.indexer, IGraphPayments.PaymentTypes.QueryFee, data); + + uint256 intermediateGatewayBalance = escrow.getBalance( + users.gateway, + address(graphTallyCollector), + users.indexer + ); + assertEq(intermediateGatewayBalance, beforeGatewayBalance - tokensToCollect); + uint256 intermediateTokensCollected = graphTallyCollector.tokensCollected( + address(subgraphService), + bytes32(uint256(uint160(allocationID))), + users.indexer, + users.gateway + ); + assertEq(intermediateTokensCollected, beforeTokensCollected + tokensToCollect); + + bytes memory data2 = _getQueryFeeEncodedData( + users.indexer, + uint128(tokensPayment), + oddTokensPayment ? tokensToCollect + 1 : tokensToCollect + ); + _collect(users.indexer, IGraphPayments.PaymentTypes.QueryFee, data2); + + // Check the indexer received the correct amount of tokens + uint256 afterGatewayBalance = escrow.getBalance(users.gateway, address(graphTallyCollector), users.indexer); + assertEq(afterGatewayBalance, beforeGatewayBalance - tokensPayment); + uint256 afterTokensCollected = graphTallyCollector.tokensCollected( + address(subgraphService), + bytes32(uint256(uint160(allocationID))), + users.indexer, + users.gateway + ); + assertEq(afterTokensCollected, intermediateTokensCollected + tokensToCollect + (oddTokensPayment ? 1 : 0)); + } } diff --git a/packages/subgraph-service/test/subgraphService/provider/register.t.sol b/packages/subgraph-service/test/subgraphService/provider/register.t.sol index 942f2e16a..e64baa060 100644 --- a/packages/subgraph-service/test/subgraphService/provider/register.t.sol +++ b/packages/subgraph-service/test/subgraphService/provider/register.t.sol @@ -14,7 +14,7 @@ contract SubgraphServiceProviderRegisterTest is SubgraphServiceTest { function test_SubgraphService_Provider_Register(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); bytes memory data = abi.encode("url", "geoHash", users.rewardsDestination); _register(users.indexer, data); } @@ -52,7 +52,7 @@ contract SubgraphServiceProviderRegisterTest is SubgraphServiceTest { uint256 tokens ) public useIndexer { tokens = bound(tokens, 1, minimumProvisionTokens - 1); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); vm.expectRevert( abi.encodeWithSelector( @@ -68,7 +68,7 @@ contract SubgraphServiceProviderRegisterTest is SubgraphServiceTest { function test_SubgraphService_Provider_Register_RevertIf_EmptyUrl(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); bytes memory data = abi.encode("", "geoHash", users.rewardsDestination); vm.expectRevert(abi.encodeWithSelector(ISubgraphService.SubgraphServiceEmptyUrl.selector)); subgraphService.register(users.indexer, data); @@ -76,7 +76,7 @@ contract SubgraphServiceProviderRegisterTest is SubgraphServiceTest { function test_SubgraphService_Provider_Register_RevertIf_EmptyGeohash(uint256 tokens) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - _createProvision(users.indexer, tokens, maxSlashingPercentage, disputePeriod); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); bytes memory data = abi.encode("url", "", users.rewardsDestination); vm.expectRevert(abi.encodeWithSelector(ISubgraphService.SubgraphServiceEmptyGeohash.selector)); subgraphService.register(users.indexer, data); diff --git a/packages/subgraph-service/test/subgraphService/provision/accept.t.sol b/packages/subgraph-service/test/subgraphService/provision/accept.t.sol index 4d421b979..a54ff7db2 100644 --- a/packages/subgraph-service/test/subgraphService/provision/accept.t.sol +++ b/packages/subgraph-service/test/subgraphService/provision/accept.t.sol @@ -17,14 +17,19 @@ contract SubgraphServiceProvisionAcceptTest is SubgraphServiceTest { uint256 tokens, uint32 newVerifierCut, uint64 newDisputePeriod - ) public useIndexer { + ) public { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); vm.assume(newVerifierCut >= fishermanRewardPercentage); vm.assume(newVerifierCut <= MAX_PPM); - newDisputePeriod = uint64(bound(newDisputePeriod, disputePeriod, MAX_WAIT_PERIOD)); + newDisputePeriod = uint64(bound(newDisputePeriod, 1, MAX_WAIT_PERIOD)); + + // Set the dispute period to the new value + resetPrank(users.governor); + disputeManager.setDisputePeriod(newDisputePeriod); // Setup indexer - _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); + resetPrank(users.indexer); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, newDisputePeriod); _register(users.indexer, abi.encode("url", "geoHash", address(0))); // Update parameters with new values @@ -34,11 +39,29 @@ contract SubgraphServiceProvisionAcceptTest is SubgraphServiceTest { _acceptProvision(users.indexer, ""); } - function test_SubgraphService_Provision_Accept_RevertWhen_NotRegistered() public useIndexer { - vm.expectRevert( - abi.encodeWithSelector(ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, users.indexer) - ); - subgraphService.acceptProvisionPendingParameters(users.indexer, ""); + function test_SubgraphService_Provision_Accept_When_NotRegistered( + uint256 tokens, + uint32 newVerifierCut, + uint64 newDisputePeriod + ) public { + tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); + vm.assume(newVerifierCut >= fishermanRewardPercentage); + vm.assume(newVerifierCut <= MAX_PPM); + newDisputePeriod = uint64(bound(newDisputePeriod, 1, MAX_WAIT_PERIOD)); + + // Set the dispute period to the new value + resetPrank(users.governor); + disputeManager.setDisputePeriod(newDisputePeriod); + + // Setup indexer but dont register + resetPrank(users.indexer); + _createProvision(users.indexer, tokens, fishermanRewardPercentage, newDisputePeriod); + + // Update parameters with new values + _setProvisionParameters(users.indexer, address(subgraphService), newVerifierCut, newDisputePeriod); + + // Accept provision and check parameters + _acceptProvision(users.indexer, ""); } function test_SubgraphService_Provision_Accept_RevertWhen_NotAuthorized() public { @@ -58,7 +81,7 @@ contract SubgraphServiceProvisionAcceptTest is SubgraphServiceTest { uint32 newVerifierCut ) public useIndexer { tokens = bound(tokens, minimumProvisionTokens, MAX_TOKENS); - vm.assume(newVerifierCut < maxSlashingPercentage); + vm.assume(newVerifierCut < fishermanRewardPercentage); // Setup indexer _createProvision(users.indexer, tokens, fishermanRewardPercentage, disputePeriod); @@ -101,7 +124,7 @@ contract SubgraphServiceProvisionAcceptTest is SubgraphServiceTest { "thawingPeriod", newDisputePeriod, disputePeriod, - type(uint64).max + disputePeriod ) ); subgraphService.acceptProvisionPendingParameters(users.indexer, ""); diff --git a/packages/subgraph-service/test/utils/Constants.sol b/packages/subgraph-service/test/utils/Constants.sol index 34be9ac74..04d64437e 100644 --- a/packages/subgraph-service/test/utils/Constants.sol +++ b/packages/subgraph-service/test/utils/Constants.sol @@ -10,7 +10,7 @@ abstract contract Constants { uint256 internal constant MIN_DISPUTE_DEPOSIT = 1 ether; // 1 GRT uint256 internal constant disputeDeposit = 100 ether; // 100 GRT uint32 internal constant fishermanRewardPercentage = 500000; // 50% - uint32 internal constant maxSlashingPercentage = 500000; // 50% + uint32 internal constant maxSlashingPercentage = 100000; // 10% // Subgraph Service uint256 internal constant minimumProvisionTokens = 1000 ether; uint256 internal constant maximumProvisionTokens = type(uint256).max;