diff --git a/l1-contracts/src/core/slashing/SlashingProposer.sol b/l1-contracts/src/core/slashing/SlashingProposer.sol index 143220d1df60..6cf723ad6630 100644 --- a/l1-contracts/src/core/slashing/SlashingProposer.sol +++ b/l1-contracts/src/core/slashing/SlashingProposer.sol @@ -50,12 +50,11 @@ import {SafeCast} from "@oz/utils/math/SafeCast.sol"; * * About SLASH_OFFSET_IN_ROUNDS: * - This offset gives us time to detect an offense and then vote on it in a later - * round. For instance, an `VALID_EPOCH_PRUNED` offense for epoch N is only triggered after - * `PROOF_SUBMISSION_WINDOW` epochs. Consider the following: - * - Epoch 1 is valid - * - At the end of epoch 3, the proof for 1 has not landed, so epoch 1 is pruned - * - Network decides to slash the committee of epoch 1 - * - This means that only starting from epoch 4 we should be voting for slashing the committee of epoch 1 + * round. For instance, a `DATA_WITHHOLDING` offense for slot S is only triggered after + * `DATA_WITHHOLDING_TOLERANCE_SLOTS` slots. Consider: + * - Slot S publishes a checkpoint + * - At slot S + tolerance, observers find missing data and want to slash the attesters + * - Voting on that slash needs to happen in a round that starts after detection * - In terms of voting, this parameter means that in round R we are voting for the committee of epochs starting * from (R - SLASH_OFFSET_IN_ROUNDS) * ROUND_SIZE_IN_EPOCHS. * - For example, with SLASH_OFFSET_IN_ROUNDS=2, ROUND_SIZE=10, and EPOCH_DURATION=2 diff --git a/spartan/aztec-node/templates/_pod-template.yaml b/spartan/aztec-node/templates/_pod-template.yaml index e395a80da696..623199606123 100644 --- a/spartan/aztec-node/templates/_pod-template.yaml +++ b/spartan/aztec-node/templates/_pod-template.yaml @@ -217,14 +217,14 @@ spec: - name: SLASH_VALIDATORS_NEVER value: {{ join "," .Values.node.slash.validatorsNever | quote }} {{- end }} - {{- if .Values.node.slash.prunePenalty }} - - name: SLASH_PRUNE_PENALTY - value: {{ .Values.node.slash.prunePenalty | quote }} - {{- end }} {{- if .Values.node.slash.dataWithholdingPenalty }} - name: SLASH_DATA_WITHHOLDING_PENALTY value: {{ .Values.node.slash.dataWithholdingPenalty | quote }} {{- end }} + {{- if .Values.node.slash.dataWithholdingToleranceSlots }} + - name: SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS + value: {{ .Values.node.slash.dataWithholdingToleranceSlots | quote }} + {{- end }} {{- if .Values.node.slash.inactivityPenalty }} - name: SLASH_INACTIVITY_PENALTY value: {{ .Values.node.slash.inactivityPenalty | quote }} diff --git a/spartan/aztec-node/values.yaml b/spartan/aztec-node/values.yaml index 30c32c5d33d2..a1219566677b 100644 --- a/spartan/aztec-node/values.yaml +++ b/spartan/aztec-node/values.yaml @@ -147,8 +147,8 @@ node: validatorsAlways: [] validatorsNever: [] # Penalty amounts for different offense types - prunePenalty: "" dataWithholdingPenalty: "" + dataWithholdingToleranceSlots: "" inactivityPenalty: "" inactivityTargetPercentage: "" invalidBlockPenalty: "" diff --git a/spartan/environments/network-defaults.yml b/spartan/environments/network-defaults.yml index 93e6a668c647..af8dd3234471 100644 --- a/spartan/environments/network-defaults.yml +++ b/spartan/environments/network-defaults.yml @@ -119,10 +119,10 @@ slasher: &slasher SLASH_MAX_PAYLOAD_SIZE: 80 # Rounds to look back when executing slashes. SLASH_EXECUTE_ROUNDS_LOOK_BACK: 4 - # Penalty for slashing validators of a valid pruned epoch. - SLASH_PRUNE_PENALTY: 10e18 # Penalty for data withholding. SLASH_DATA_WITHHOLDING_PENALTY: 10e18 + # Number of full L2 slots to wait after a checkpoint's slot before declaring its txs missing. + SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS: 3 # Missed attestation percentage to trigger inactivity slash (0, 1]. SLASH_INACTIVITY_TARGET_PERCENTAGE: 0.9 # Consecutive epochs a validator must be inactive before slashing. @@ -237,7 +237,6 @@ networks: PUBLIC_OTEL_EXPORTER_OTLP_METRICS_ENDPOINT: "" PUBLIC_OTEL_COLLECT_FROM: "" # Slasher penalties - SLASH_PRUNE_PENALTY: 10e18 SLASH_DATA_WITHHOLDING_PENALTY: 10e18 SLASH_INACTIVITY_TARGET_PERCENTAGE: 0.9 SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD: 1 @@ -284,7 +283,6 @@ networks: P2P_MAX_PENDING_TX_COUNT: 1000 P2P_TX_POOL_DELETE_TXS_AFTER_REORG: true # Slasher penalties - SLASH_PRUNE_PENALTY: 10e18 SLASH_DATA_WITHHOLDING_PENALTY: 10e18 SLASH_INACTIVITY_TARGET_PERCENTAGE: 0.9 SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD: 1 @@ -345,7 +343,6 @@ networks: PUBLIC_OTEL_COLLECT_FROM: "" ENABLE_VERSION_CHECK: false # Slasher penalties - more lenient initially - SLASH_PRUNE_PENALTY: 0 SLASH_DATA_WITHHOLDING_PENALTY: 0 SLASH_INACTIVITY_TARGET_PERCENTAGE: 0.8 SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD: 2 diff --git a/spartan/scripts/deploy_network.sh b/spartan/scripts/deploy_network.sh index 8b809b90261a..c7003d14e050 100755 --- a/spartan/scripts/deploy_network.sh +++ b/spartan/scripts/deploy_network.sh @@ -586,8 +586,8 @@ PROVER_PUBLISHERS_PER_PROVER = ${PUBLISHERS_PER_PROVER} SENTINEL_ENABLED = ${SENTINEL_ENABLED:-null} SLASH_INACTIVITY_TARGET_PERCENTAGE = ${SLASH_INACTIVITY_TARGET_PERCENTAGE:-null} SLASH_INACTIVITY_PENALTY = ${SLASH_INACTIVITY_PENALTY:-null} -SLASH_PRUNE_PENALTY = ${SLASH_PRUNE_PENALTY:-null} SLASH_DATA_WITHHOLDING_PENALTY = ${SLASH_DATA_WITHHOLDING_PENALTY:-null} +SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS = ${SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS:-null} SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY = ${SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY:-null} SLASH_DUPLICATE_PROPOSAL_PENALTY = ${SLASH_DUPLICATE_PROPOSAL_PENALTY:-null} SLASH_DUPLICATE_ATTESTATION_PENALTY = ${SLASH_DUPLICATE_ATTESTATION_PENALTY:-null} diff --git a/spartan/terraform/deploy-aztec-infra/main.tf b/spartan/terraform/deploy-aztec-infra/main.tf index 52b80e924a06..159814445998 100644 --- a/spartan/terraform/deploy-aztec-infra/main.tf +++ b/spartan/terraform/deploy-aztec-infra/main.tf @@ -200,8 +200,8 @@ locals { "validator.sentinel.enabled" = var.SENTINEL_ENABLED "validator.slash.inactivityTargetPercentage" = var.SLASH_INACTIVITY_TARGET_PERCENTAGE "validator.slash.inactivityPenalty" = var.SLASH_INACTIVITY_PENALTY - "validator.slash.prunePenalty" = var.SLASH_PRUNE_PENALTY "validator.slash.dataWithholdingPenalty" = var.SLASH_DATA_WITHHOLDING_PENALTY + "validator.slash.dataWithholdingToleranceSlots" = var.SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS "validator.slash.proposeInvalidAttestationsPenalty" = var.SLASH_PROPOSE_INVALID_ATTESTATIONS_PENALTY "validator.slash.duplicateProposalPenalty" = var.SLASH_DUPLICATE_PROPOSAL_PENALTY "validator.slash.duplicateAttestationPenalty" = var.SLASH_DUPLICATE_ATTESTATION_PENALTY diff --git a/spartan/terraform/deploy-aztec-infra/variables.tf b/spartan/terraform/deploy-aztec-infra/variables.tf index 18cd6e7406e8..79d92e694cb1 100644 --- a/spartan/terraform/deploy-aztec-infra/variables.tf +++ b/spartan/terraform/deploy-aztec-infra/variables.tf @@ -466,14 +466,14 @@ variable "SLASH_INACTIVITY_PENALTY" { nullable = true } -variable "SLASH_PRUNE_PENALTY" { - description = "The slash prune penalty" +variable "SLASH_DATA_WITHHOLDING_PENALTY" { + description = "The slash data withholding penalty" type = string nullable = true } -variable "SLASH_DATA_WITHHOLDING_PENALTY" { - description = "The slash data withholding penalty" +variable "SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS" { + description = "L2 slots to wait after a checkpoint slot before slashing for data withholding" type = string nullable = true } diff --git a/yarn-project/aztec-node/src/aztec-node/server.ts b/yarn-project/aztec-node/src/aztec-node/server.ts index 3c4f21f7f689..95440a05ffb2 100644 --- a/yarn-project/aztec-node/src/aztec-node/server.ts +++ b/yarn-project/aztec-node/src/aztec-node/server.ts @@ -43,7 +43,7 @@ import { PublicContractsDB, PublicProcessorFactory } from '@aztec/simulator/serv import { AttestationsBlockWatcher, BroadcastedInvalidCheckpointProposalWatcher, - EpochPruneWatcher, + DataWithholdingWatcher, type SlasherClientInterface, type Watcher, createSlasher, @@ -62,7 +62,12 @@ import { type NormalizedBlockParameter, inspectBlockParameter, } from '@aztec/stdlib/block'; -import { type CheckpointData, L1PublishedData, type PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; +import { + type CheckpointData, + InMemoryCheckpointReexecutionTracker, + L1PublishedData, + type PublishedCheckpoint, +} from '@aztec/stdlib/checkpoint'; import type { ContractClassPublic, ContractDataSource, @@ -175,7 +180,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb protected readonly proverNode: ProverNode | undefined, protected readonly slasherClient: SlasherClientInterface | undefined, protected readonly validatorsSentinel: Sentinel | undefined, - protected readonly epochPruneWatcher: EpochPruneWatcher | undefined, + protected readonly dataWithholdingWatcher: DataWithholdingWatcher | undefined, protected readonly attestationsBlockWatcher: AttestationsBlockWatcher | undefined, protected readonly l1ChainId: number, protected readonly version: number, @@ -660,6 +665,9 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb let validatorClient: ValidatorClient | undefined; + // Tracks successful checkpoint re-execution by a checkpoint proposal handler. + const reexecutionTracker = new InMemoryCheckpointReexecutionTracker(); + if (!config.disableValidator) { // Create validator client if required validatorClient = await createValidatorClient(config, { @@ -673,6 +681,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb l1ToL2MessageSource: archiver, keyStoreManager, blobClient, + reexecutionTracker, slashingProtectionDb: deps.slashingProtectionDb, }); @@ -709,6 +718,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb blobClient, dateProvider, telemetry, + reexecutionTracker, }).register(p2pClient, reexecute, archiver); } @@ -719,7 +729,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb await p2pClient.start(); let validatorsSentinel: Awaited> | undefined; - let epochPruneWatcher: EpochPruneWatcher | undefined; + let dataWithholdingWatcher: DataWithholdingWatcher | undefined; let attestationsBlockWatcher: AttestationsBlockWatcher | undefined; let broadcastedInvalidCheckpointProposalWatcher: BroadcastedInvalidCheckpointProposalWatcher | undefined; @@ -729,16 +739,17 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb watchers.push(validatorsSentinel); } - if (config.slashPrunePenalty > 0n || config.slashDataWithholdingPenalty > 0n) { - epochPruneWatcher = new EpochPruneWatcher( - archiver, - archiver, + if (config.slashDataWithholdingPenalty > 0n) { + dataWithholdingWatcher = new DataWithholdingWatcher( epochCache, + archiver, p2pClient.getTxProvider(), - validatorCheckpointsBuilder, + p2pClient, + reexecutionTracker, + { chainId: config.l1ChainId, rollupAddress: config.rollupAddress }, config, ); - watchers.push(epochPruneWatcher); + watchers.push(dataWithholdingWatcher); } if (config.slashBroadcastedInvalidCheckpointProposalPenalty > 0n) { @@ -765,9 +776,9 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb await validatorsSentinel.start(); started.push(validatorsSentinel); } - if (epochPruneWatcher) { - await epochPruneWatcher.start(); - started.push(epochPruneWatcher); + if (dataWithholdingWatcher) { + await dataWithholdingWatcher.start(); + started.push(dataWithholdingWatcher); } if (attestationsBlockWatcher) { await attestationsBlockWatcher.start(); @@ -906,7 +917,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb proverNode, slasherClient, validatorsSentinel, - epochPruneWatcher, + dataWithholdingWatcher, attestationsBlockWatcher, ethereumChain.chainInfo.id, config.rollupVersion, @@ -1185,7 +1196,7 @@ export class AztecNodeService implements AztecNode, AztecNodeAdmin, AztecNodeDeb this.log.info(`Stopping Aztec Node`); await tryStop(this.attestationsBlockWatcher); await tryStop(this.validatorsSentinel); - await tryStop(this.epochPruneWatcher); + await tryStop(this.dataWithholdingWatcher); await tryStop(this.slasherClient); await Promise.all([tryStop(this.peerProofVerifier), tryStop(this.rpcProofVerifier)]); await tryStop(this.sequencer); diff --git a/yarn-project/aztec-node/src/test/index.ts b/yarn-project/aztec-node/src/test/index.ts index be390476ef21..22e12a224cef 100644 --- a/yarn-project/aztec-node/src/test/index.ts +++ b/yarn-project/aztec-node/src/test/index.ts @@ -1,7 +1,7 @@ import type { EpochCacheInterface } from '@aztec/epoch-cache'; import type { P2P } from '@aztec/p2p'; import { SequencerClient } from '@aztec/sequencer-client'; -import { EpochPruneWatcher, type SlasherClientInterface } from '@aztec/slasher'; +import { DataWithholdingWatcher, type SlasherClientInterface } from '@aztec/slasher'; import type { L2BlockSource } from '@aztec/stdlib/block'; import type { ContractDataSource } from '@aztec/stdlib/contract'; import type { L2LogsSource, Service, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; @@ -23,7 +23,7 @@ export declare class TestAztecNodeService extends AztecNodeService { declare public sequencer: SequencerClient | undefined; declare public slasherClient: SlasherClientInterface | undefined; declare public validatorsSentinel: Sentinel | undefined; - declare public epochPruneWatcher: EpochPruneWatcher | undefined; + declare public dataWithholdingWatcher: DataWithholdingWatcher | undefined; declare public l1ChainId: number; declare public version: number; declare public globalVariableBuilder: GlobalVariableBuilderInterface; diff --git a/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts index 808b7222a1d0..bf3bb2aac524 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/data_withholding_slash.test.ts @@ -1,8 +1,9 @@ import type { AztecNodeService } from '@aztec/aztec-node'; import { waitForTx } from '@aztec/aztec.js/node'; -import { EpochNumber } from '@aztec/foundation/branded-types'; -import { times } from '@aztec/foundation/collection'; +import { BlockNumber, CheckpointNumber, EpochNumber } from '@aztec/foundation/branded-types'; +import { retryUntil } from '@aztec/foundation/retry'; import { OffenseType } from '@aztec/slasher'; +import { Tx, TxHash } from '@aztec/stdlib/tx'; import { jest } from '@jest/globals'; import fs from 'fs'; @@ -11,43 +12,56 @@ import path from 'path'; import { shouldCollectMetrics } from '../fixtures/fixtures.js'; import { createNodes } from '../fixtures/setup_p2p_test.js'; -import { P2PNetworkTest, WAIT_FOR_TX_TIMEOUT } from './p2p_network.js'; -import { awaitCommitteeExists, awaitCommitteeKicked, awaitOffenseDetected, submitTransactions } from './shared.js'; +import { P2PNetworkTest } from './p2p_network.js'; +import { awaitCommitteeExists, awaitOffenseDetected, submitTransactions } from './shared.js'; -jest.setTimeout(1000000); +const TEST_TIMEOUT = 1_000_000; +jest.setTimeout(TEST_TIMEOUT); -// Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds +// Don't set this above 9 — each node uses a distinct anvil seed for its publisher account. const NUM_VALIDATORS = 4; const BOOT_NODE_UDP_PORT = 4500; const COMMITTEE_SIZE = NUM_VALIDATORS; - -// This test needs longer slot window to ensure that the client has enough time to submit their txs, -// and have the nodes get recreated, prior to the reorg. -const AZTEC_SLOT_DURATION = process.env.AZTEC_SLOT_DURATION ? parseInt(process.env.AZTEC_SLOT_DURATION) : 32; +const ETHEREUM_SLOT_DURATION = 4; +const AZTEC_SLOT_DURATION = ETHEREUM_SLOT_DURATION * 3; +const TOLERANCE_SLOTS = 3; const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'data-withholding-slash-')); /** - * Demonstrate that slashing occurs when the chain is pruned, and we are unable to collect the transactions data post-hoc. - * - * The setup of the test is as follows: - * 1. Create the "initial" node, and 4 other nodes - * 2. Await the 4 other nodes to form the committee - * 3. Send a tx to the initial node - * 4. Stop all the nodes and wipe their data directories - * 5. Re-create the nodes - * 6. Expect that a slash payload is deployed with the data withholding offense + * Verifies the per-slot data-withholding slash path (A-523). * - * The reason is that with the data directories wiped, they have no way to get the original transaction data - * when the chain is pruned. So they slash themselves. + * Scenario — a realistic data-withholding attack: * + * 1. 4 validators, all in the committee. slashSelfAllowed, quorum 3. + * 2. Pick one validator to be the malicious proposer (A). Its outbound tx gossip is + * stubbed so the tx never leaves A's mempool. The tx is sent directly to A. + * 3. Two other committee members (B, C) are configured to "attest blindly" — their + * block- and checkpoint-proposal handlers are stubbed to return isValid:true without + * re-executing. They sign whatever A broadcasts. + * 4. The fourth committee member (D) is honest: it tries to fetch the missing tx, can't, + * and refuses to attest. + * 5. Tx-collection is also stubbed on every node so no path can pull the tx from A — + * not at proposal time, not via post-mining backfill. This simulates the data being + * genuinely unavailable to anyone except A. + * 6. A self-attests + collects B's and C's attestations → quorum 3 → publishes. + * 7. After `slashDataWithholdingToleranceSlots` full slots, the watchers on B, C, and D + * probe `getAvailableTxs` against their own mempools, find the tx missing, and emit + * a slot-keyed DATA_WITHHOLDING for the three attesters (A, B, C). + * 8. With slashSelfAllowed the offense reaches quorum; A, B, C are slashed on L1. D is + * not slashed because it never attested. */ describe('e2e_p2p_data_withholding_slash', () => { let t: P2PNetworkTest; - let nodes: AztecNodeService[]; + let nodes: AztecNodeService[] = []; const slashingUnit = BigInt(1e18); const slashingQuorum = 3; + // L1 enforces `QUORUM > ROUND_SIZE / 2`, so with quorum=3 we cap round size at 5. + // With committee 4 and only B/C/D voting (A has the tx and never detects the offense), + // a single 4-slot round only meets quorum when all three of B/C/D happen to propose + // (~23% probability). Extending slashOffenseExpirationRounds gives us several rounds to + // hit quorum before the offense expires. const slashingRoundSize = 4; const aztecEpochDuration = 2; @@ -62,17 +76,20 @@ describe('e2e_p2p_data_withholding_slash', () => { anvilSlotsInAnEpoch: 4, listenAddress: '127.0.0.1', aztecEpochDuration, - ethereumSlotDuration: 4, + ethereumSlotDuration: ETHEREUM_SLOT_DURATION, aztecSlotDuration: AZTEC_SLOT_DURATION, - aztecProofSubmissionEpochs: 0, // effectively forces instant reorgs aztecTargetCommitteeSize: COMMITTEE_SIZE, + // Long proof submission window so the legacy L1-prune path is irrelevant. + aztecProofSubmissionEpochs: 1024, + slashInactivityConsecutiveEpochThreshold: 32, slashingQuorum, slashingRoundSizeInEpochs: slashingRoundSize / aztecEpochDuration, slashAmountSmall: slashingUnit, slashAmountMedium: slashingUnit * 2n, slashAmountLarge: slashingUnit * 3n, slashSelfAllowed: true, - minTxsPerBlock: 0, + slashDataWithholdingToleranceSlots: TOLERANCE_SLOTS, + minTxsPerBlock: 1, enableProposerPipelining: true, inboxLag: 2, }, @@ -83,41 +100,53 @@ describe('e2e_p2p_data_withholding_slash', () => { }); afterEach(async () => { - await t.stopNodes(nodes); + if (nodes.length > 0) { + await t.stopNodes(nodes); + } await t.teardown(); for (let i = 0; i < NUM_VALIDATORS; i++) { fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true, maxRetries: 3 }); } }); - const debugRollup = async () => { - await t.ctx.cheatCodes.rollup.debugRollup(); - }; - - it('slashes the committee when data is unavailable for the pruned epoch', async () => { + it('slashes attesters that attest to proposals containing withheld transactions', async () => { if (!t.bootstrapNodeEnr) { throw new Error('Bootstrap node ENR is not available'); } - const { rollup, slashingProposer } = await t.getContracts(); - - // Jump forward to an epoch in the future such that the validator set is not empty - await t.ctx.cheatCodes.rollup.advanceToEpoch(EpochNumber(4)); - await debugRollup(); + const { rollup } = await t.getContracts(); + + // Jump to an epoch where the validator set is non-empty. The validator set rotates per + // epoch and sometimes lands empty for early epochs, so advance epoch-by-epoch until we + // find one with a full committee. + let epoch = EpochNumber(4); + await retryUntil( + async () => { + await t.ctx.cheatCodes.rollup.advanceToEpoch(epoch); + const committee = await rollup.getCurrentEpochCommittee(); + if (committee?.length === NUM_VALIDATORS) { + t.logger.warn(`Found valid committee of ${committee.length} at epoch ${epoch}`); + return true; + } + t.logger.warn(`Epoch ${epoch} has ${committee?.length ?? 0} committee members, advancing`); + epoch = EpochNumber(epoch + 1); + return false; + }, + 'epoch with full committee', + 120, + 0, + ); const [activationThreshold, ejectionThreshold, localEjectionThreshold] = await Promise.all([ rollup.getActivationThreshold(), rollup.getEjectionThreshold(), rollup.getLocalEjectionThreshold(), ]); - - // Slashing amount should be enough to kick validators out const slashingAmount = slashingUnit * 3n; const biggestEjection = ejectionThreshold > localEjectionThreshold ? ejectionThreshold : localEjectionThreshold; expect(activationThreshold - slashingAmount).toBeLessThan(biggestEjection); t.ctx.aztecNodeConfig.slashDataWithholdingPenalty = slashingAmount; - t.ctx.aztecNodeConfig.slashPrunePenalty = slashingAmount; t.ctx.aztecNodeConfig.minTxsPerBlock = 1; t.logger.warn('Creating nodes'); @@ -129,124 +158,106 @@ describe('e2e_p2p_data_withholding_slash', () => { BOOT_NODE_UDP_PORT, t.genesis, DATA_DIR, - // To collect metrics - run in aztec-packages `docker compose --profile metrics up` and set COLLECT_METRICS=true shouldCollectMetrics(), ); - // Wait for P2P mesh to be fully formed before proceeding await t.waitForP2PMeshConnectivity(nodes, NUM_VALIDATORS); - await debugRollup(); - const committee = await awaitCommitteeExists({ rollup, logger: t.logger }); - await debugRollup(); + await awaitCommitteeExists({ rollup, logger: t.logger }); - // Jump forward more time to ensure we're at the beginning of an epoch. - // This should reduce flake, since we need to have the transaction included - // and the nodes recreated, prior to the reorg. - // Considering the slot duration is 32 seconds, - // Considering the epoch duration is 2 slots, - // we have ~64 seconds to do this. + // The validator watchers floor processing at their boot slot. Advance past it so the tx + // checkpoint lands in a slot the watcher will actually process. await t.ctx.cheatCodes.rollup.advanceToEpoch(EpochNumber(8)); - await t.sendDummyTx(); - await debugRollup(); - - // Send L2 txs through a validator node to ensure blocks are built (needed for pruning to trigger). - t.logger.warn('Sending L2 txs through a validator node'); - const txHashes = await submitTransactions(t.logger, nodes[0], 1, t.fundedAccount); - await Promise.all(txHashes.map(txHash => waitForTx(nodes[0], txHash, { timeout: WAIT_FOR_TX_TIMEOUT }))); - t.logger.warn('L2 txs mined'); - - t.logger.warn('Stopping nodes'); - // removeInitialNode sends a dummy L1 tx and awaits its receipt to sync the - // dateProvider, so it must run while L1 mining is still active. - await t.removeInitialNode(); - - // Pause L1 block production while we tear down and recreate validators. With - // `aztecProofSubmissionEpochs=0`, epoch 8 becomes prunable as soon as epoch 9 begins - // (~32s after slot 17). The stop/wipe/recreate cycle takes longer than that, so L1 - // would otherwise race past the prune deadline before the recreated nodes come up. - // When that happens, the recreated archivers detect the prune during their initial - // sync (`handleEpochPrune` emits `L2PruneUnproven`), but the `EpochPruneWatcher` - // listener is only attached after `archiver.waitForInitialSync()` resolves - // (see `aztec-node/server.ts`), so the event is dropped and `DATA_WITHHOLDING` is - // never emitted. By freezing L1 here, the recreated archivers ingest checkpoint 1 - // cleanly during initial sync, the watcher starts and attaches its listener, and - // then we resume L1 below so the prune fires while the listener is live. - const ethCheatCodes = t.ctx.cheatCodes.eth; - await ethCheatCodes.setAutomine(false); - await ethCheatCodes.setIntervalMining(0); - - // Fail fast if we paused too late — i.e. if L1 already crossed into epoch 9 before - // we got here. In that case the recreated nodes would still see the prune during - // initial sync and the test would flake exactly the same way. - const epochAtPause = await rollup.getCurrentEpoch(); - expect(Number(epochAtPause)).toBeLessThan(9); - - // Now stop the validator nodes. With L1 paused, any in-flight L1 submissions from - // the validator sequencers would hang `sequencer.stop()` (it awaits pending L1 - // submissions). Since `minTxsPerBlock=1` and no txs are queued for slot 18+, the - // sequencers don't submit further L1 transactions after the slot-17 checkpoint - // (already published before `waitForTx` returned), so this is safe. - await t.stopNodes(nodes); - // And remove the data directories (which forms the crux of the "attack") - for (let i = 0; i < NUM_VALIDATORS; i++) { - fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true, maxRetries: 3 }); - } - // Re-create the nodes. - // ASSUMING they sync in the middle of the epoch, they will "see" the reorg, and try to slash. - // Reset minTxsPerBlock to 0 so re-created validators build empty checkpoints. Under proposer - // pipelining, the vote-offenses signature is bound to the target slot and the multicall is only - // delayed to the target slot start when a checkpoint is being proposed; without a proposal, - // votes would mine in the current wall-clock slot, causing the EIP-712 signature verification to fail. - t.ctx.aztecNodeConfig.minTxsPerBlock = 0; - t.logger.warn('Re-creating nodes'); - nodes = await createNodes( - t.ctx.aztecNodeConfig, - t.ctx.dateProvider, - t.bootstrapNodeEnr, - NUM_VALIDATORS, - BOOT_NODE_UDP_PORT, - t.genesis, - DATA_DIR, + // Assign roles. With minTxsPerBlock=1 and tx gossip suppressed on the proposer, only the + // proposer can ever build a block, so we just wait for it to be designated proposer. + const [proposerNode, blindAttester1, blindAttester2, honestNode] = nodes; + const proposerAddress = proposerNode.getSequencer()!.validatorAddresses![0]; + const blindAttester1Address = blindAttester1.getSequencer()!.validatorAddresses![0]; + const blindAttester2Address = blindAttester2.getSequencer()!.validatorAddresses![0]; + const honestAddress = honestNode.getSequencer()!.validatorAddresses![0]; + t.logger.warn( + `Proposer ${proposerAddress}, blind attesters ${blindAttester1Address}/${blindAttester2Address}, honest ${honestAddress}`, ); - // Wait for P2P mesh to be fully formed before proceeding - await t.waitForP2PMeshConnectivity(nodes, NUM_VALIDATORS); + // 1. Stub outbound tx gossip on the proposer. Tx messages going out are dropped silently; + // other gossip topics (proposals, attestations) pass through. + const proposerP2pService: any = (proposerNode as any).p2pClient.p2pService; + const originalPropagate = proposerP2pService.propagate.bind(proposerP2pService); + jest.spyOn(proposerP2pService, 'propagate').mockImplementation(((msg: any) => { + if (msg instanceof Tx) { + t.logger.info(`Suppressing outbound tx gossip from proposer ${proposerAddress}`); + return Promise.resolve(); + } + return originalPropagate(msg); + }) as any); + + // 2. Stub tx-collection on EVERY node so nothing can pull the tx back from the proposer + // over reqresp (neither at proposal time nor via post-mining backfill). + for (const node of nodes) { + const txCollection: any = (node as any).p2pClient.txCollection; + jest.spyOn(txCollection, 'collectFastFor').mockResolvedValue([]); + jest.spyOn(txCollection, 'collectFastForBlock').mockResolvedValue(undefined); + } + + // 3. Stub block- and checkpoint-proposal handling on the blind attesters so they attest + // without re-executing or fetching txs. + for (const node of [blindAttester1, blindAttester2]) { + const proposalHandler: any = (node as any).validatorClient.getProposalHandler(); + jest.spyOn(proposalHandler, 'handleBlockProposal').mockImplementation((async () => { + const blockNumber = await node.getBlockNumber(); + return { isValid: true, blockNumber: BlockNumber(blockNumber + 1) }; + }) as any); + jest.spyOn(proposalHandler, 'handleCheckpointProposal').mockResolvedValue({ + isValid: true, + checkpointNumber: CheckpointNumber(1), + } as any); + } - // Resume L1 block production. Warp L1 forward to current wall-clock time so the - // epoch-8 deadline is crossed immediately on the next L1 block, then re-enable - // interval mining. By now each recreated archiver has block 1 stored locally and - // its `EpochPruneWatcher` listener is attached, so the next sync iteration emits - // `L2PruneUnproven` for epoch 8 to a live listener → `DATA_WITHHOLDING`. - const resumeTimestamp = Math.floor(t.ctx.dateProvider.now() / 1000); - await ethCheatCodes.setNextBlockTimestamp(resumeTimestamp); - await ethCheatCodes.mine(); - await ethCheatCodes.setIntervalMining(t.ctx.aztecNodeConfig.ethereumSlotDuration); + // 4. Send the tx directly to the proposer; it propagates into the local mempool and stays + // there (gossip suppressed). Combined with `minTxsPerBlock: 1`, only the proposer can + // build a block, so the tx sits in the mempool until the proposer is next selected. + t.logger.warn(`Submitting tx through proposer ${proposerAddress}`); + const [txHash] = await submitTransactions(t.logger, proposerNode, 1, t.fundedAccount); + await waitForTx(proposerNode, txHash, { timeout: AZTEC_SLOT_DURATION * 6 * 1000 }); + const checkpointSlot = await getMinedSlot(proposerNode, txHash); + t.logger.warn(`Tx ${txHash} mined at checkpoint slot ${checkpointSlot}`); + + // 5. After the tolerance window, every non-proposer's watcher should fire for the 3 + // attesters (proposer A self-signs, plus blind attesters B and C). + const expectedOffendedAddresses = [proposerAddress, blindAttester1Address, blindAttester2Address] + .map(a => a.toString()) + .sort(); const offenses = await awaitOffenseDetected({ epochDuration: t.ctx.aztecNodeConfig.aztecEpochDuration, logger: t.logger, - nodeAdmin: nodes[0], + nodeAdmin: honestNode, slashingRoundSize, - waitUntilOffenseCount: COMMITTEE_SIZE, + waitUntilOffenseCount: 3, + timeoutSeconds: AZTEC_SLOT_DURATION * (TOLERANCE_SLOTS + 8), }); - // Check offenses are correct - expect(offenses.map(o => o.validator.toString()).sort()).toEqual(committee.map(a => a.toString()).sort()); - expect(offenses.map(o => o.offenseType)).toEqual(times(COMMITTEE_SIZE, () => OffenseType.DATA_WITHHOLDING)); - const offenseEpoch = Number(offenses[0].epochOrSlot); - - await awaitCommitteeKicked({ - rollup, - cheatCodes: t.ctx.cheatCodes.rollup, - committee, - slashingProposer, - slashingRoundSize, - aztecSlotDuration: AZTEC_SLOT_DURATION, - logger: t.logger, - offenseEpoch, - aztecEpochDuration, - }); + expect(offenses).toHaveLength(3); + expect(offenses.map(o => o.offenseType)).toEqual(offenses.map(() => OffenseType.DATA_WITHHOLDING)); + for (const offense of offenses) { + expect(offense.epochOrSlot).toEqual(BigInt(checkpointSlot)); + } + expect(offenses.map(o => o.validator.toString()).sort()).toEqual(expectedOffendedAddresses); + // The honest non-attester must NOT be slashed. + expect(offenses.map(o => o.validator.toString())).not.toContain(honestAddress.toString()); }); }); + +/** Returns the slot at which a tx was included, by querying the node's tx receipt. */ +async function getMinedSlot(node: AztecNodeService, txHash: TxHash): Promise { + const receipt = await node.getTxReceipt(txHash); + if (!receipt.blockNumber) { + throw new Error(`Tx ${txHash} has no block number on receipt`); + } + const block = await node.getBlock(receipt.blockNumber); + if (!block) { + throw new Error(`Block ${receipt.blockNumber} not found`); + } + return Number(block.header.getSlot()); +} diff --git a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts index ce4a8f706999..9c27e88a5f4f 100644 --- a/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_p2p/duplicate_attestation_slash.test.ts @@ -213,6 +213,18 @@ describe('e2e_p2p_duplicate_attestation_slash', () => { nodes = [maliciousNode1, maliciousNode2, honestNode1, honestNode2]; + // Stub the proposer's own-checkpoint-proposal loopback on the malicious nodes. The default + // path awaits a local handleCheckpointProposal → validateCheckpointProposal that retries + // until the proposed block lands in the archiver — but skipPushProposedBlocksToArchiver + // means it never does, so the await hangs until the retry deadline (~one slot). By the + // time the proposer returns from broadcast, the wallclock is in the target slot and the + // staleness gate refuses the self-attestation, so no duplicate attestations are ever + // broadcast. + for (const node of [maliciousNode1, maliciousNode2]) { + const p2pService: any = (node as any).p2pClient.p2pService; + jest.spyOn(p2pService, 'notifyOwnCheckpointProposal').mockResolvedValue(undefined); + } + // Wait for P2P mesh on all needed topics before starting sequencers await t.waitForP2PMeshConnectivity(nodes, NUM_VALIDATORS, 30, 0.1, [ TopicType.tx, diff --git a/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts b/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts deleted file mode 100644 index c9129bf2f3d6..000000000000 --- a/yarn-project/end-to-end/src/e2e_p2p/valid_epoch_pruned_slash.test.ts +++ /dev/null @@ -1,193 +0,0 @@ -import type { AztecNodeService } from '@aztec/aztec-node'; -import { EpochNumber } from '@aztec/foundation/branded-types'; -import { times } from '@aztec/foundation/collection'; -import { sleep } from '@aztec/foundation/sleep'; -import { SpamContract } from '@aztec/noir-test-contracts.js/Spam'; -import { OffenseType } from '@aztec/slasher'; - -import { jest } from '@jest/globals'; -import fs from 'fs'; -import os from 'os'; -import path from 'path'; - -import { shouldCollectMetrics } from '../fixtures/fixtures.js'; -import { createNodes } from '../fixtures/setup_p2p_test.js'; -import { P2PNetworkTest } from './p2p_network.js'; -import { awaitCommitteeExists, awaitCommitteeKicked, awaitOffenseDetected } from './shared.js'; - -jest.setTimeout(10 * 60_000); // 10 minutes - -// Don't set this to a higher value than 9 because each node will use a different L1 publisher account and anvil seeds -const NUM_VALIDATORS = 4; -const COMMITTEE_SIZE = NUM_VALIDATORS; -const BOOT_NODE_UDP_PORT = 4500; - -const DATA_DIR = fs.mkdtempSync(path.join(os.tmpdir(), 'valid-epoch-pruned-slash-')); - -/** - * Test that we slash the committee when the pruned epoch could have been proven. - * We don't need to do anything special for this test other than to run it without a prover node - * (which is the default), and this will produce pruned epochs that could have been proven. But we do - * need to send a tx to make sure that the slash is due to valid epoch prune and not data withholding. - * - * TODO(palla/mbps): Add tests for 1) out messages and 2) partial epoch prunes - */ -describe('e2e_p2p_valid_epoch_pruned_slash', () => { - let t: P2PNetworkTest; - let nodes: AztecNodeService[]; - - const slashingQuorum = 3; - const slashingRoundSize = 4; - const ethereumSlotDuration = 8; - const aztecSlotDuration = 24; - const aztecEpochDuration = 2; - const initialEpoch = 8; - const slashingUnit = BigInt(1e18); - - beforeEach(async () => { - t = await P2PNetworkTest.create({ - testName: 'e2e_p2p_valid_epoch_pruned', - numberOfNodes: 0, - numberOfValidators: NUM_VALIDATORS, - basePort: BOOT_NODE_UDP_PORT, - metricsPort: shouldCollectMetrics(), - initialConfig: { - anvilSlotsInAnEpoch: 4, - enforceTimeTable: true, - cancelTxOnTimeout: false, - sequencerPublisherAllowInvalidStates: true, - listenAddress: '127.0.0.1', - aztecEpochDuration, - ethereumSlotDuration, - aztecSlotDuration, - aztecProofSubmissionEpochs: 1, - slashingQuorum, - slashingRoundSizeInEpochs: slashingRoundSize / aztecEpochDuration, - slashSelfAllowed: true, - slashGracePeriodL2Slots: initialEpoch * aztecEpochDuration, - slashAmountSmall: slashingUnit, - slashAmountMedium: slashingUnit * 2n, - slashAmountLarge: slashingUnit * 3n, - aztecTargetCommitteeSize: COMMITTEE_SIZE, - enableProposerPipelining: true, - inboxLag: 2, - }, - }); - - await t.setup(); - await t.applyBaseSetup(); - }); - - afterEach(async () => { - await t.stopNodes(nodes); - await t.teardown(); - for (let i = 0; i < NUM_VALIDATORS; i++) { - fs.rmSync(`${DATA_DIR}-${i}`, { recursive: true, force: true, maxRetries: 3 }); - } - }); - - const debugRollup = async () => { - await t.ctx.cheatCodes.rollup.debugRollup(); - }; - - it('slashes the committee when the pruned epoch could have been proven', async () => { - // create the bootstrap node for the network - if (!t.bootstrapNodeEnr) { - throw new Error('Bootstrap node ENR is not available'); - } - - const { rollup, slashingProposer } = await t.getContracts(); - const [activationThreshold, ejectionThreshold, localEjectionThreshold] = await Promise.all([ - rollup.getActivationThreshold(), - rollup.getEjectionThreshold(), - rollup.getLocalEjectionThreshold(), - ]); - - // Slashing amount should be enough to kick validators out - const slashingAmount = slashingUnit * 3n; - const biggestEjection = ejectionThreshold > localEjectionThreshold ? ejectionThreshold : localEjectionThreshold; - expect(activationThreshold - slashingAmount).toBeLessThan(biggestEjection); - - t.ctx.aztecNodeConfig.slashPrunePenalty = slashingAmount; - t.ctx.aztecNodeConfig.minTxsPerBlock = 1; - t.ctx.aztecNodeConfig.txPoolDeleteTxsAfterReorg = true; - - t.logger.warn(`Creating ${NUM_VALIDATORS} new nodes`); - nodes = await createNodes( - t.ctx.aztecNodeConfig, - t.ctx.dateProvider, - t.bootstrapNodeEnr, - NUM_VALIDATORS, - BOOT_NODE_UDP_PORT, - t.genesis, - DATA_DIR, - // To collect metrics - run in aztec-packages `docker compose --profile metrics up` and set COLLECT_METRICS=true - shouldCollectMetrics(), - ); - - // Wait a bit for peers to discover each other - await sleep(4000); - await debugRollup(); - - // Wait for the committee to exist - await t.ctx.cheatCodes.rollup.advanceToEpoch(EpochNumber(2)); - await t.ctx.cheatCodes.rollup.markAsProven(); - const committee = await awaitCommitteeExists({ rollup, logger: t.logger }); - await debugRollup(); - - // Set up a wallet and keep it out of reorgs - await t.ctx.cheatCodes.rollup.markAsProven(); - t.setupWalletOnNode(nodes[0]); - await t.setupAccount(); - await t.ctx.cheatCodes.rollup.markAsProven(); - - // Warp forward to after the initial grace period - expect(await rollup.getCurrentEpoch()).toBeLessThan(initialEpoch); - await t.ctx.cheatCodes.rollup.advanceToEpoch(EpochNumber(initialEpoch), { offset: -ethereumSlotDuration }); - await t.ctx.cheatCodes.rollup.markAsProven(); - - // Send a tx to deploy a contract so that we have a tx with public function execution in the pruned epoch - // This allows us to test that the slashed offense is valid epoch prune and not data withholding - t.logger.warn(`Submitting deployment tx to the network`); - const _spamContract = await SpamContract.deploy(t.wallet!).send({ from: t.defaultAccountAddress! }); - - // And send a tx that depends on a tx with public function execution on a contract class that will be reorged out - // This allows us to test that we handle pruned contract classes correctly - // TODO(palla/A-51): For this check to actually check what we need, we need to ensure the deployment and the - // this tx are in different blocks but within the same epoch, so it gets reexecuted by the prune-watcher. - // This does not always happen in the current test setup. - // t.logger.warn(`Submitting tx with public function execution to the network`); - // await spamContract.methods.spam(1, 1, true).send({ from: t.defaultAccountAddress! }); - - // Remove initial node (it's a lightweight archiver with no P2P/validator/sequencer, but clean up anyway) - t.logger.warn(`Removing initial node`); - await t.removeInitialNode(); - - // Wait for epoch to be pruned and the offense to be detected - const offenses = await awaitOffenseDetected({ - logger: t.logger, - nodeAdmin: nodes[0], - slashingRoundSize, - epochDuration: t.ctx.aztecNodeConfig.aztecEpochDuration, - waitUntilOffenseCount: COMMITTEE_SIZE, - }); - - // Check offenses are correct - expect(offenses.map(o => o.validator.toString()).sort()).toEqual(committee.map(a => a.toString()).sort()); - expect(offenses.map(o => o.offenseType)).toEqual(times(COMMITTEE_SIZE, () => OffenseType.VALID_EPOCH_PRUNED)); - const offenseEpoch = Number(offenses[0].epochOrSlot); - - // And then wait for them to be kicked out - await awaitCommitteeKicked({ - rollup, - cheatCodes: t.ctx.cheatCodes.rollup, - committee, - slashingProposer, - slashingRoundSize, - aztecSlotDuration, - logger: t.logger, - offenseEpoch, - aztecEpochDuration, - }); - }); -}); diff --git a/yarn-project/end-to-end/src/e2e_slashing/broadcasted_invalid_checkpoint_proposal_slash.test.ts b/yarn-project/end-to-end/src/e2e_slashing/broadcasted_invalid_checkpoint_proposal_slash.test.ts index 51395cbc47cb..bd9186286012 100644 --- a/yarn-project/end-to-end/src/e2e_slashing/broadcasted_invalid_checkpoint_proposal_slash.test.ts +++ b/yarn-project/end-to-end/src/e2e_slashing/broadcasted_invalid_checkpoint_proposal_slash.test.ts @@ -228,7 +228,6 @@ describe('e2e_slashing_broadcasted_invalid_checkpoint_proposal_slash', () => { slashAmountSmall: slashingUnit, slashAmountMedium: slashingUnit * 2n, slashAmountLarge: slashingUnit * 3n, - slashPrunePenalty: 0n, slashDataWithholdingPenalty: 0n, slashInactivityPenalty: 0n, slashBroadcastedInvalidBlockPenalty: 0n, diff --git a/yarn-project/foundation/src/config/env_var.ts b/yarn-project/foundation/src/config/env_var.ts index 65d020ac5f49..26b79525bfa0 100644 --- a/yarn-project/foundation/src/config/env_var.ts +++ b/yarn-project/foundation/src/config/env_var.ts @@ -159,7 +159,7 @@ export type EnvVar = | 'P2P_DROP_TX_CHANCE' | 'P2P_TX_POOL_DELETE_TXS_AFTER_REORG' | 'P2P_MIN_TX_POOL_AGE_MS' - | 'P2P_MISSING_TX_COLLECTION_DEADLINE_MS' + | 'P2P_MISSING_TX_COLLECTION_DEADLINE_SLOTS' | 'P2P_RPC_PRICE_BUMP_PERCENTAGE' | 'DEBUG_P2P_INSTRUMENT_MESSAGES' | 'PEER_ID_PRIVATE_KEY' @@ -239,8 +239,8 @@ export type EnvVar = | 'SEQ_SKIP_CHECKPOINT_PUBLISH_PERCENT' | 'SLASH_VALIDATORS_ALWAYS' | 'SLASH_VALIDATORS_NEVER' - | 'SLASH_PRUNE_PENALTY' | 'SLASH_DATA_WITHHOLDING_PENALTY' + | 'SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS' | 'SLASH_INACTIVITY_PENALTY' | 'SLASH_INACTIVITY_TARGET_PERCENTAGE' | 'SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD' diff --git a/yarn-project/p2p/src/client/p2p_client.test.ts b/yarn-project/p2p/src/client/p2p_client.test.ts index 115e4744585f..01c03e0d206d 100644 --- a/yarn-project/p2p/src/client/p2p_client.test.ts +++ b/yarn-project/p2p/src/client/p2p_client.test.ts @@ -49,6 +49,7 @@ describe('P2P Client', () => { epochCache = mock(); epochCache.getCurrentAndNextSlot.mockReturnValue({ currentSlot: SlotNumber(0), nextSlot: SlotNumber(1) }); epochCache.getTargetAndNextSlot.mockReturnValue({ targetSlot: SlotNumber(0), nextSlot: SlotNumber(1) }); + epochCache.getL1Constants.mockReturnValue(l1Constants); attestationPool = await createTestAttestationPool(); diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index 2816d013159e..25512eff09be 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -24,6 +24,7 @@ import { type L2TipsStore, } from '@aztec/stdlib/block'; import type { ContractDataSource } from '@aztec/stdlib/contract'; +import { getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import { type PeerInfo, tryStop } from '@aztec/stdlib/interfaces/server'; import { type BlockProposal, CheckpointAttestation, type CheckpointProposal, type TopicType } from '@aztec/stdlib/p2p'; import type { BlockHeader, Tx, TxHash } from '@aztec/stdlib/tx'; @@ -671,7 +672,16 @@ export class P2PClient extends WithTracer implements P2P { `Starting collection of ${missingTxHashes.length} missing txs for unproven mined block ${block.number}`, { missingTxHashes, blockNumber: block.number, blockHash: await block.hash().then(h => h.toString()) }, ); - const deadline = new Date(this._dateProvider.now() + this.config.p2pMissingTxCollectionDeadlineMs); + // Both `slashDataWithholdingToleranceSlots` and `p2pMissingTxCollectionDeadlineSlots` + // count *full slots after the block slot* — value N means collection runs until + // `slotStart(block.slot + N + 1)`. Take the larger of the two so collection never + // gives up before the data-withholding slash verdict is rendered. + const blockSlot = block.header.getSlot(); + const toleranceSlots = this.config.slashDataWithholdingToleranceSlots; + const configuredSlots = this.config.p2pMissingTxCollectionDeadlineSlots ?? 0; + const deadlineSlot = SlotNumber(blockSlot + Math.max(toleranceSlots, configuredSlots) + 1); + const deadlineSeconds = getTimestampForSlot(deadlineSlot, this.epochCache.getL1Constants()); + const deadline = new Date(Number(deadlineSeconds) * 1000); await this.txCollection.collectFastForBlock(block, missingTxHashes, { deadline }); } } diff --git a/yarn-project/p2p/src/config.ts b/yarn-project/p2p/src/config.ts index 4986a0602b13..a31e6206081f 100644 --- a/yarn-project/p2p/src/config.ts +++ b/yarn-project/p2p/src/config.ts @@ -221,8 +221,19 @@ export interface P2PConfig /** Minimum age (ms) a transaction must have been in the pool before it's eligible for block building. */ minTxPoolAgeMs: number; - /** Deadline in ms used when collecting missing txs for unproven mined blocks. */ - p2pMissingTxCollectionDeadlineMs: number; + /** + * Number of full L2 slots to wait after a checkpoint's slot before declaring its txs missing + * for data-withholding slashing. + */ + slashDataWithholdingToleranceSlots: number; + + /** + * Number of L2 slots after a mined block's slot to keep collecting its missing txs. Clamped + * up so that collection always runs at least until the data-withholding slash verdict is + * rendered (`block.slot + slashDataWithholdingToleranceSlots + 1`). Defaults to undefined, + * in which case the tolerance window is used directly. + */ + p2pMissingTxCollectionDeadlineSlots?: number; /** Minimum percentage fee increase required to replace an existing tx via RPC (0 = no bump). */ priceBumpPercentage: bigint; @@ -559,10 +570,17 @@ export const p2pConfigMappings: ConfigMappingsType = { description: 'Minimum age (ms) a transaction must have been in the pool before it is eligible for block building.', ...numberConfigHelper(2_000), }, - p2pMissingTxCollectionDeadlineMs: { - env: 'P2P_MISSING_TX_COLLECTION_DEADLINE_MS', - description: 'Deadline in ms used when collecting missing txs for unproven mined blocks.', - ...numberConfigHelper(72_000), + slashDataWithholdingToleranceSlots: { + env: 'SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS', + description: + 'L2 slots to wait after a checkpoint slot before declaring its txs missing. Drives both the data-withholding slasher check and the missing-tx collection deadline.', + ...numberConfigHelper(3), + }, + p2pMissingTxCollectionDeadlineSlots: { + env: 'P2P_MISSING_TX_COLLECTION_DEADLINE_SLOTS', + description: + 'Optional deadline (in L2 slots after the block slot) for collecting missing txs for unproven mined blocks. Clamped up to the data-withholding tolerance window so collection never gives up before the slash verdict.', + ...optionalNumberConfigHelper(), }, priceBumpPercentage: { env: 'P2P_RPC_PRICE_BUMP_PERCENTAGE', diff --git a/yarn-project/p2p/src/services/tx_provider.ts b/yarn-project/p2p/src/services/tx_provider.ts index 311e31162351..5004ba6eb532 100644 --- a/yarn-project/p2p/src/services/tx_provider.ts +++ b/yarn-project/p2p/src/services/tx_provider.ts @@ -32,6 +32,11 @@ export class TxProvider implements ITxProvider { this.instrumentation = new TxProviderInstrumentation(client, 'TxProvider'); } + /** Returns whether each tx hash is currently in the local tx pool. */ + public hasTxs(txHashes: TxHash[]): Promise { + return this.txPool.hasTxs(txHashes); + } + /** Returns txs from the tx pool given their hashes.*/ public async getAvailableTxs(txHashes: TxHash[]): Promise<{ txs: Tx[]; missingTxs: TxHash[] }> { const response = await this.txPool.getTxsByHash(txHashes); diff --git a/yarn-project/p2p/src/test-helpers/test_tx_provider.ts b/yarn-project/p2p/src/test-helpers/test_tx_provider.ts index 20ae98e634f2..6e4ae3a9b91a 100644 --- a/yarn-project/p2p/src/test-helpers/test_tx_provider.ts +++ b/yarn-project/p2p/src/test-helpers/test_tx_provider.ts @@ -31,6 +31,11 @@ export class TestTxProvider implements ITxProvider { return this.getTxsByHashes(txHashes); } + /** Returns whether each tx hash is in the seeded collection. */ + hasTxs(txHashes: TxHash[]): Promise { + return Promise.resolve(txHashes.map(h => this.txs.has(h.toString()))); + } + /** Get txs for a block proposal, returning any seeded txs that match the requested hashes. */ getTxsForBlockProposal( blockProposal: BlockProposal, diff --git a/yarn-project/prover-client/src/orchestrator/orchestrator.ts b/yarn-project/prover-client/src/orchestrator/orchestrator.ts index 2fb617696015..19951893c916 100644 --- a/yarn-project/prover-client/src/orchestrator/orchestrator.ts +++ b/yarn-project/prover-client/src/orchestrator/orchestrator.ts @@ -89,7 +89,7 @@ export class ProvingOrchestrator extends TopTreeProvingScheduler implements Epoc protected provingPromise: Promise | undefined = undefined; private metrics: ProvingOrchestratorMetrics; - // eslint-disable-next-line aztec-custom/no-non-primitive-in-collections + private dbs: Map = new Map(); constructor( diff --git a/yarn-project/slasher/README.md b/yarn-project/slasher/README.md index e62427fab4f9..31d3a600ffbe 100644 --- a/yarn-project/slasher/README.md +++ b/yarn-project/slasher/README.md @@ -81,16 +81,10 @@ Key features: List of all slashable offenses in the system: ### DATA_WITHHOLDING -**Description**: The data required for proving an epoch was not made publicly available. -**Detection**: EpochPruneWatcher detects when an epoch cannot be proven due to missing data. -**Target**: Committee members of the affected epoch. -**Time Unit**: Epoch-based offense. - -### VALID_EPOCH_PRUNED -**Description**: An epoch was not successfully proven within the proof submission window. -**Detection**: EpochPruneWatcher monitors epochs that expire without valid proofs. -**Target**: Committee members of the unpruned epoch. -**Time Unit**: Epoch-based offense. +**Description**: The transaction data for a published checkpoint was not made available within the tolerance window. +**Detection**: DataWithholdingWatcher checks each published checkpoint's txs against the local mempool once `slashDataWithholdingToleranceSlots` full slots have elapsed past the checkpoint's slot (i.e. at `slotStart(checkpoint.slot + slashDataWithholdingToleranceSlots + 1)`). +**Target**: Validators who attested to the checkpoint. +**Time Unit**: Slot-based offense (the checkpoint's slot). ### INACTIVITY **Description**: A proposer failed to attest or propose blocks during their assigned slots. @@ -156,7 +150,7 @@ Considerations: - The `slashingQuorumSize` should be more than half and less than the total number of validators in a round, so that we require a majority to slash. The number of validators in a round is the committee size times the number of epochs in a round. - The bigger a `slashingRoundSizeInEpochs`, the bigger the upper bound on the quorum size. This increases security, as we need more validators to agree before slashing. However, it also makes slashing slower, and more expensive to execute in terms of gas. -- The `slashingOffsetInRounds` is required because the validators in a given slashing round must vote for _past_ offenses. Otherwise, if someone commits an offense near the end of a round, they can get away with their offense without the validators being able to collect enough votes to slash them. The offset needs to be big enough so that all offenses are discoverable, so this value should be strictly greater than the proof submission window in order to be able to slash for epoch prunes or data withholding. +- The `slashingOffsetInRounds` is required because the validators in a given slashing round must vote for _past_ offenses. Otherwise, if someone commits an offense near the end of a round, they can get away with their offense without the validators being able to collect enough votes to slash them. The offset needs to be big enough so that all offenses are discoverable, so this value should be strictly greater than the data-withholding tolerance window so that there is time to detect missing data and vote. - The `slashingExecutionDelayInRounds` allows vetoers to stop an invalid slash. This should be large enough to give vetoers time to act, but strictly smaller than the validator exit window, so an offender cannot escape before they are slashed. It should also be small enough so that an offender that would be kicked out does not get picked up to be a committee member again before their slash is executed. In other words, if a validator commits a serious enough offense that we want them out of the validator set as soon as possible, the execution delay should not allow them to be chosen to participate in another committee. ### Local Node Configuration (SlasherConfig) @@ -169,8 +163,8 @@ These settings are configured locally on each validator node: - `slashValidatorsNever`: Array of validator addresses that should never be slashed (own validator addresses are automatically added to this list) - `slashInactivityTargetPercentage`: Percentage of misses during an epoch to be slashed for INACTIVITY - `slashInactivityConsecutiveEpochThreshold`: How many consecutive inactive epochs are needed to trigger an INACTIVITY slash on a validator -- `slashPrunePenalty`: Penalty for VALID_EPOCH_PRUNED - `slashDataWithholdingPenalty`: Penalty for DATA_WITHHOLDING +- `slashDataWithholdingToleranceSlots`: Number of full L2 slots to wait after a checkpoint's slot before declaring its txs missing - `slashInactivityPenalty`: Penalty for INACTIVITY - `slashBroadcastedInvalidBlockPenalty`: Penalty for BROADCASTED_INVALID_BLOCK_PROPOSAL - `slashBroadcastedInvalidCheckpointProposalPenalty`: Penalty for BROADCASTED_INVALID_CHECKPOINT_PROPOSAL diff --git a/yarn-project/slasher/src/config.ts b/yarn-project/slasher/src/config.ts index 441ee79e6551..57a3d96735a7 100644 --- a/yarn-project/slasher/src/config.ts +++ b/yarn-project/slasher/src/config.ts @@ -16,8 +16,8 @@ export const DefaultSlasherConfig: SlasherConfig = { slashOverridePayload: undefined, slashValidatorsAlways: [], // Empty by default slashValidatorsNever: [], // Empty by default - slashPrunePenalty: BigInt(slasherDefaultEnv.SLASH_PRUNE_PENALTY), slashDataWithholdingPenalty: BigInt(slasherDefaultEnv.SLASH_DATA_WITHHOLDING_PENALTY), + slashDataWithholdingToleranceSlots: slasherDefaultEnv.SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS, slashInactivityTargetPercentage: slasherDefaultEnv.SLASH_INACTIVITY_TARGET_PERCENTAGE, slashInactivityConsecutiveEpochThreshold: slasherDefaultEnv.SLASH_INACTIVITY_CONSECUTIVE_EPOCH_THRESHOLD, slashBroadcastedInvalidBlockPenalty: BigInt(slasherDefaultEnv.SLASH_INVALID_BLOCK_PENALTY), @@ -67,16 +67,17 @@ export const slasherConfigMappings: ConfigMappingsType = { .map(addr => EthAddress.fromString(addr)), defaultValue: DefaultSlasherConfig.slashValidatorsNever, }, - slashPrunePenalty: { - env: 'SLASH_PRUNE_PENALTY', - description: 'Penalty amount for slashing validators of a valid pruned epoch (set to 0 to disable).', - ...bigintConfigHelper(DefaultSlasherConfig.slashPrunePenalty), - }, slashDataWithholdingPenalty: { env: 'SLASH_DATA_WITHHOLDING_PENALTY', description: 'Penalty amount for slashing validators for data withholding (set to 0 to disable).', ...bigintConfigHelper(DefaultSlasherConfig.slashDataWithholdingPenalty), }, + slashDataWithholdingToleranceSlots: { + env: 'SLASH_DATA_WITHHOLDING_TOLERANCE_SLOTS', + description: + 'Number of full L2 slots that must elapse after a checkpoint slot before declaring its txs missing and slashing its attesters for data withholding.', + ...numberConfigHelper(DefaultSlasherConfig.slashDataWithholdingToleranceSlots), + }, slashBroadcastedInvalidBlockPenalty: { env: 'SLASH_INVALID_BLOCK_PENALTY', description: 'Penalty amount for slashing a validator for an invalid block proposed via p2p.', diff --git a/yarn-project/slasher/src/index.ts b/yarn-project/slasher/src/index.ts index b59b375fb98f..3976ea1ae170 100644 --- a/yarn-project/slasher/src/index.ts +++ b/yarn-project/slasher/src/index.ts @@ -1,5 +1,5 @@ export * from './config.js'; -export * from './watchers/epoch_prune_watcher.js'; +export * from './watchers/data_withholding_watcher.js'; export * from './watchers/attestations_block_watcher.js'; export * from './watchers/broadcasted_invalid_checkpoint_proposal_watcher.js'; export * from './slasher_client.js'; diff --git a/yarn-project/slasher/src/stores/offenses_store.test.ts b/yarn-project/slasher/src/stores/offenses_store.test.ts index 6215e541cbca..d195c9386bb1 100644 --- a/yarn-project/slasher/src/stores/offenses_store.test.ts +++ b/yarn-project/slasher/src/stores/offenses_store.test.ts @@ -107,7 +107,7 @@ describe('SlasherOffensesStore', () => { it('should handle large amounts and epoch/slot values', async () => { const largeAmount = BigInt('0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'); // Max uint128 const largeEpochOrSlot = BigInt(1_000_000_000); - const offense = createOffense(EthAddress.random(), largeAmount, OffenseType.VALID_EPOCH_PRUNED, largeEpochOrSlot); + const offense = createOffense(EthAddress.random(), largeAmount, OffenseType.INACTIVITY, largeEpochOrSlot); await store.addOffense(offense); diff --git a/yarn-project/slasher/src/watchers/data_withholding_watcher.test.ts b/yarn-project/slasher/src/watchers/data_withholding_watcher.test.ts new file mode 100644 index 000000000000..56382e71091f --- /dev/null +++ b/yarn-project/slasher/src/watchers/data_withholding_watcher.test.ts @@ -0,0 +1,296 @@ +import type { EpochCache } from '@aztec/epoch-cache'; +import { SlotNumber } from '@aztec/foundation/branded-types'; +import { EthAddress } from '@aztec/foundation/eth-address'; +import type { L2BlockSource } from '@aztec/stdlib/block'; +import type { CheckpointReexecutionTracker, PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; +import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; +import type { ITxProvider, P2PApi } from '@aztec/stdlib/interfaces/server'; +import type { CoordinationSignatureContext } from '@aztec/stdlib/p2p'; +import { OffenseType } from '@aztec/stdlib/slashing'; +import { TxHash } from '@aztec/stdlib/tx'; + +import { type MockProxy, mock } from 'jest-mock-extended'; + +import { WANT_TO_SLASH_EVENT, type WantToSlashArgs } from '../watcher.js'; +import { DataWithholdingWatcher } from './data_withholding_watcher.js'; + +class TestDataWithholdingWatcher extends DataWithholdingWatcher { + public attestersBySlot = new Map(); + + protected override extractAttesters(published: PublishedCheckpoint): Promise { + return Promise.resolve(this.attestersBySlot.get(published.checkpoint.header.slotNumber) ?? []); + } +} + +describe('DataWithholdingWatcher', () => { + const TOLERANCE = 3; + const PENALTY = 1_000_000_000_000_000_000n; + const signatureContext: CoordinationSignatureContext = { + chainId: 31337, + rollupAddress: EthAddress.fromNumber(1), + }; + + let epochCache: MockProxy; + let l2BlockSource: MockProxy>; + let txProvider: MockProxy>; + let p2p: MockProxy>; + let reexecutionTracker: MockProxy>; + let watcher: TestDataWithholdingWatcher; + let l1Constants: L1RollupConstants; + + beforeEach(() => { + epochCache = mock(); + l2BlockSource = mock>(); + txProvider = mock>(); + p2p = mock>(); + p2p.getCheckpointAttestationsForSlot.mockResolvedValue([]); + reexecutionTracker = mock>(); + reexecutionTracker.hasReexecuted.mockReturnValue(false); + + l1Constants = { + l1StartBlock: 1n, + l1GenesisTime: 1_700_000_000n, + slotDuration: 24, + epochDuration: 8, + ethereumSlotDuration: 12, + proofSubmissionEpochs: 1, + targetCommitteeSize: 48, + rollupManaLimit: Number.MAX_SAFE_INTEGER, + }; + epochCache.getL1Constants.mockReturnValue(l1Constants); + + watcher = new TestDataWithholdingWatcher( + epochCache as EpochCache, + l2BlockSource, + txProvider, + p2p, + reexecutionTracker, + signatureContext, + { + slashDataWithholdingPenalty: PENALTY, + slashDataWithholdingToleranceSlots: TOLERANCE, + }, + ); + }); + + afterEach(async () => { + await watcher.stop(); + }); + + /** + * Builds a minimal published-checkpoint shape carrying just the fields the watcher reads: + * `checkpoint.{header.slotNumber, number, archive.root, blocks[*].body.txEffects[*].txHash}`. + * extractAttesters is overridden in the test subclass, so attestations content does not matter. + */ + const makePublished = (slot: number, txCount: number): PublishedCheckpoint => { + const txEffects = Array.from({ length: txCount }, () => ({ txHash: TxHash.random() })); + return { + checkpoint: { + header: { slotNumber: SlotNumber(slot) }, + number: slot, + archive: { root: { toString: () => `archive-${slot}` } }, + blocks: [{ body: { txEffects } }], + }, + } as unknown as PublishedCheckpoint; + }; + + /** Configures the archiver's synced-slot mock and starts the watcher at a known initial state. */ + const startAtSlot = async (initialSlot: number) => { + l2BlockSource.getSyncedL2SlotNumber.mockResolvedValue(SlotNumber(initialSlot)); + await watcher.start(); + }; + + /** Sets the watcher's "current slot" as seen by `work()` (via the archiver's synced slot). */ + const setSyncedSlot = (slot: number) => l2BlockSource.getSyncedL2SlotNumber.mockResolvedValue(SlotNumber(slot)); + + /** Captures emitted slash args. */ + const captureEmits = (): WantToSlashArgs[][] => { + const captured: WantToSlashArgs[][] = []; + watcher.on(WANT_TO_SLASH_EVENT, args => captured.push(args)); + return captured; + }; + + /** Mocks `hasTxs` so the given hashes report as missing and all others as present. */ + const mockMissing = (missingHashes: TxHash[]) => { + const missingSet = new Set(missingHashes.map(h => h.toString())); + txProvider.hasTxs.mockImplementation((hashes: TxHash[]) => + Promise.resolve(hashes.map(h => !missingSet.has(h.toString()))), + ); + }; + + it('does nothing on a tick before tolerance has elapsed', async () => { + await startAtSlot(0); + setSyncedSlot(TOLERANCE - 1); + const captured = captureEmits(); + + await watcher.work(); + + expect(l2BlockSource.getCheckpoint).not.toHaveBeenCalled(); + expect(captured).toHaveLength(0); + }); + + it('does not look back before its initial slot', async () => { + await startAtSlot(100); + // Even though current slot is well beyond initial+tolerance, we never go past the floor. + setSyncedSlot(100 + TOLERANCE); + const captured = captureEmits(); + + await watcher.work(); + + expect(l2BlockSource.getCheckpoint).not.toHaveBeenCalled(); + expect(captured).toHaveLength(0); + }); + + it('skips slots with no published checkpoint', async () => { + await startAtSlot(10); + // tolerance=3 → slot S becomes processable once currentSlot >= S + 4. + // currentSlot=17 makes the eligible window (initialSlot, currentSlot - tolerance - 1] = (10, 13]. + setSyncedSlot(17); + l2BlockSource.getCheckpoint.mockResolvedValue(undefined); + const captured = captureEmits(); + + await watcher.work(); + + expect(l2BlockSource.getCheckpoint).toHaveBeenCalledWith({ slot: SlotNumber(11) }); + expect(l2BlockSource.getCheckpoint).toHaveBeenCalledWith({ slot: SlotNumber(12) }); + expect(l2BlockSource.getCheckpoint).toHaveBeenCalledWith({ slot: SlotNumber(13) }); + expect(captured).toHaveLength(0); + }); + + it('does not slash when all txs are available for a published checkpoint', async () => { + await startAtSlot(10); + setSyncedSlot(11 + TOLERANCE + 1); + + const slot = 11; + const published = makePublished(slot, 2); + l2BlockSource.getCheckpoint.mockResolvedValue(published); + mockMissing([]); + watcher.attestersBySlot.set(slot, [EthAddress.random(), EthAddress.random()]); + const captured = captureEmits(); + + await watcher.work(); + + expect(txProvider.hasTxs).toHaveBeenCalled(); + expect(captured).toHaveLength(0); + }); + + it('emits a slash for the per-checkpoint attesters when txs are missing', async () => { + await startAtSlot(10); + setSyncedSlot(11 + TOLERANCE + 1); + + const slot = 11; + const published = makePublished(slot, 3); + const missingHash = published.checkpoint.blocks[0].body.txEffects[0].txHash; + l2BlockSource.getCheckpoint.mockResolvedValue(published); + mockMissing([missingHash]); + + const attesterA = EthAddress.random(); + const attesterB = EthAddress.random(); + watcher.attestersBySlot.set(slot, [attesterA, attesterB]); + + const captured = captureEmits(); + + await watcher.work(); + + expect(captured).toHaveLength(1); + expect(captured[0]).toEqual([ + { + validator: attesterA, + amount: PENALTY, + offenseType: OffenseType.DATA_WITHHOLDING, + epochOrSlot: BigInt(slot), + }, + { + validator: attesterB, + amount: PENALTY, + offenseType: OffenseType.DATA_WITHHOLDING, + epochOrSlot: BigInt(slot), + }, + ]); + }); + + it('does not re-emit for the same slot on subsequent ticks', async () => { + await startAtSlot(10); + setSyncedSlot(11 + TOLERANCE + 1); + + const slot = 11; + const published = makePublished(slot, 1); + const missing = published.checkpoint.blocks[0].body.txEffects[0].txHash; + l2BlockSource.getCheckpoint.mockResolvedValue(published); + mockMissing([missing]); + watcher.attestersBySlot.set(slot, [EthAddress.random()]); + const captured = captureEmits(); + + await watcher.work(); + expect(captured).toHaveLength(1); + + // Tick again at the same currentSlot — the watcher should not re-process slot 11. + await watcher.work(); + expect(captured).toHaveLength(1); + expect(l2BlockSource.getCheckpoint).toHaveBeenCalledTimes(1); + }); + + it('respects penalty=0 as a disable switch', async () => { + watcher.updateConfig({ slashDataWithholdingPenalty: 0n }); + await startAtSlot(10); + setSyncedSlot(10 + TOLERANCE + 5); + + const captured = captureEmits(); + await watcher.work(); + + expect(l2BlockSource.getCheckpoint).not.toHaveBeenCalled(); + expect(captured).toHaveLength(0); + }); + + it('does not slash a checkpoint with no recoverable attesters even if txs are missing', async () => { + await startAtSlot(10); + setSyncedSlot(11 + TOLERANCE + 1); + + const slot = 11; + const published = makePublished(slot, 1); + const missing = published.checkpoint.blocks[0].body.txEffects[0].txHash; + l2BlockSource.getCheckpoint.mockResolvedValue(published); + mockMissing([missing]); + watcher.attestersBySlot.set(slot, []); + const captured = captureEmits(); + + await watcher.work(); + + expect(captured).toHaveLength(0); + }); + + it('sets epochOrSlot to the checkpoint slot, not its epoch (slot-keyed offense)', async () => { + await startAtSlot(0); + setSyncedSlot(1 + TOLERANCE + 1); + + const slot = 1; + const published = makePublished(slot, 1); + const missing = published.checkpoint.blocks[0].body.txEffects[0].txHash satisfies TxHash; + l2BlockSource.getCheckpoint.mockResolvedValue(published); + mockMissing([missing]); + watcher.attestersBySlot.set(slot, [EthAddress.random()]); + const captured = captureEmits(); + + await watcher.work(); + + expect(captured).toHaveLength(1); + expect(captured[0][0].epochOrSlot).toEqual(BigInt(slot)); + }); + + it('short-circuits when the checkpoint has already been re-executed locally', async () => { + await startAtSlot(10); + setSyncedSlot(11 + TOLERANCE + 1); + + const slot = 11; + const published = makePublished(slot, 2); + l2BlockSource.getCheckpoint.mockResolvedValue(published); + reexecutionTracker.hasReexecuted.mockReturnValue(true); + const captured = captureEmits(); + + await watcher.work(); + + expect(reexecutionTracker.hasReexecuted).toHaveBeenCalled(); + expect(txProvider.hasTxs).not.toHaveBeenCalled(); + expect(captured).toHaveLength(0); + }); +}); diff --git a/yarn-project/slasher/src/watchers/data_withholding_watcher.ts b/yarn-project/slasher/src/watchers/data_withholding_watcher.ts new file mode 100644 index 000000000000..a2eb68f22d0b --- /dev/null +++ b/yarn-project/slasher/src/watchers/data_withholding_watcher.ts @@ -0,0 +1,213 @@ +import type { EpochCache } from '@aztec/epoch-cache'; +import { CheckpointProposalHash, SlotNumber } from '@aztec/foundation/branded-types'; +import { compactArray, merge, pick } from '@aztec/foundation/collection'; +import type { EthAddress } from '@aztec/foundation/eth-address'; +import { type Logger, createLogger } from '@aztec/foundation/log'; +import { RunningPromise } from '@aztec/foundation/promise'; +import type { L2BlockSource } from '@aztec/stdlib/block'; +import { getAttestationInfoFromPublishedCheckpoint } from '@aztec/stdlib/block'; +import type { CheckpointReexecutionTracker, PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; +import type { ITxProvider, P2PApi, SlasherConfig } from '@aztec/stdlib/interfaces/server'; +import { ConsensusPayload, type CoordinationSignatureContext } from '@aztec/stdlib/p2p'; +import { OffenseType } from '@aztec/stdlib/slashing'; +import type { TxHash } from '@aztec/stdlib/tx'; + +import EventEmitter from 'node:events'; + +import { WANT_TO_SLASH_EVENT, type WantToSlashArgs, type Watcher, type WatcherEmitter } from '../watcher.js'; + +const DataWithholdingWatcherConfigKeys = ['slashDataWithholdingPenalty', 'slashDataWithholdingToleranceSlots'] as const; + +type DataWithholdingWatcherConfig = Pick; + +/** + * Detects data-withholding offenses by probing the local mempool for the txs in published + * checkpoints once they are old enough that an honest node should have collected them. + * + * Per AZIP-7: once `slashDataWithholdingToleranceSlots` full slots have elapsed after the + * checkpoint's slot — i.e. at `slotStart(checkpoint.slot + slashDataWithholdingToleranceSlots + * + 1)` — if any tx from the checkpoint's blocks is still missing locally, the checkpoint's + * attesters are considered at fault for not making the data available, and we emit a slash + * for them. + * + * The watcher ticks at quarter-eth-slot cadence (matching the Sentinel template). On boot it + * floors processing at the current slot — restart-time gaps are accepted and not back-filled, + * matching the Sentinel approach. + */ +export class DataWithholdingWatcher extends (EventEmitter as new () => WatcherEmitter) implements Watcher { + private runningPromise: RunningPromise; + private initialSlot: SlotNumber | undefined; + private lastCheckedSlot: SlotNumber | undefined; + private config: DataWithholdingWatcherConfig; + + constructor( + private readonly epochCache: EpochCache, + private readonly l2BlockSource: Pick, + private readonly txProvider: Pick, + private readonly p2p: Pick, + private readonly reexecutionTracker: Pick, + private readonly signatureContext: CoordinationSignatureContext, + config: DataWithholdingWatcherConfig, + private readonly log: Logger = createLogger('data-withholding-watcher'), + ) { + super(); + this.config = pick(config, ...DataWithholdingWatcherConfigKeys); + const interval = (epochCache.getL1Constants().ethereumSlotDuration * 1000) / 4; + this.runningPromise = new RunningPromise(this.work.bind(this), log, interval); + this.log.verbose(`DataWithholdingWatcher initialized`, this.config); + } + + public async start(): Promise { + // Floor processing at the archiver's synced slot rather than the wallclock — restart-time + // gaps before the archiver catches up are accepted and not back-filled. Falls back to the + // wallclock if the archiver isn't ready yet (cold start). + const syncedSlot = await this.l2BlockSource.getSyncedL2SlotNumber(); + this.initialSlot = syncedSlot ?? this.epochCache.getSlotNow(); + this.log.info(`Starting data-withholding watcher with initial slot ${this.initialSlot}`); + this.runningPromise.start(); + } + + public stop(): Promise { + return this.runningPromise.stop(); + } + + public updateConfig(config: Partial): void { + this.config = merge(this.config, pick(config, ...DataWithholdingWatcherConfigKeys)); + this.log.verbose('DataWithholdingWatcher config updated', this.config); + } + + /** + * Runs every tick. Walks newly-eligible slots and probes their checkpoints for data + * availability; emits a DATA_WITHHOLDING slash for any checkpoint whose txs are missing. + */ + public async work(): Promise { + if (this.initialSlot === undefined) { + return; + } + + if (this.config.slashDataWithholdingPenalty === 0n) { + return; // disabled + } + + // tolerance is the number of full slots that must elapse after the checkpoint's slot + // before we declare its data missing. For checkpoint slot S, we therefore process S + // only once we are in slot `S + tolerance + 1` or later. Drive this off the archiver's + // synced slot rather than the wallclock so we don't make claims about slots we haven't + // fully ingested yet (archiver may lag behind L1). + const tolerance = this.config.slashDataWithholdingToleranceSlots; + const currentSlot = (await this.l2BlockSource.getSyncedL2SlotNumber()) ?? this.epochCache.getSlotNow(); + if (currentSlot <= tolerance) { + return; + } + + const targetSlot = SlotNumber(currentSlot - tolerance - 1); + if (targetSlot <= this.initialSlot) { + return; + } + + const startSlot = this.lastCheckedSlot === undefined ? this.initialSlot : this.lastCheckedSlot; + for (let slot = SlotNumber(startSlot + 1); slot <= targetSlot; slot = SlotNumber(slot + 1)) { + try { + await this.processSlot(slot); + } catch (err) { + this.log.error(`Error processing slot ${slot} for data-withholding check`, err, { slot }); + } + this.lastCheckedSlot = slot; + } + } + + /** Probes the checkpoint at the given slot, if any, and emits a slash on missing txs. */ + private async processSlot(slot: SlotNumber): Promise { + const published = await this.l2BlockSource.getCheckpoint({ slot }); + if (!published) { + this.log.trace(`No published checkpoint at slot ${slot}`, { slot }); + return; + } + + const checkpointNumber = published.checkpoint.number; + const archiveRoot = published.checkpoint.archive.root; + + // Short-circuit: if we re-executed this checkpoint locally, the data was available to + // us, so there's no need to probe the mempool. + if (this.reexecutionTracker.hasReexecuted(checkpointNumber, archiveRoot)) { + this.log.trace(`Already re-executed checkpoint at slot ${slot}; skipping`, { slot, checkpointNumber }); + return; + } + + const txHashes: TxHash[] = published.checkpoint.blocks.flatMap(block => + block.body.txEffects.map(txEffect => txEffect.txHash), + ); + + if (txHashes.length === 0) { + this.log.trace(`Checkpoint at slot ${slot} has no txs`, { slot }); + return; + } + + const availability = await this.txProvider.hasTxs(txHashes); + const missingTxs = txHashes.filter((_, i) => !availability[i]); + if (missingTxs.length === 0) { + this.log.trace(`All ${txHashes.length} txs available for checkpoint at slot ${slot}`, { slot }); + return; + } + + const attesters = await this.extractAttesters(published); + + if (attesters.length === 0) { + this.log.warn(`Detected ${missingTxs.length} missing txs at slot ${slot} but no recoverable attesters`, { + slot, + missingTxs: missingTxs.map(h => h.toString()), + }); + return; + } + + this.log.warn( + `Detected data withholding at slot ${slot}: ${missingTxs.length}/${txHashes.length} txs missing. Slashing ${attesters.length} attesters.`, + { + slot, + missingTxs: missingTxs.map(h => h.toString()), + attesters: attesters.map(a => a.toString()), + }, + ); + + const args: WantToSlashArgs[] = attesters.map(validator => ({ + validator, + amount: this.config.slashDataWithholdingPenalty, + offenseType: OffenseType.DATA_WITHHOLDING, + epochOrSlot: BigInt(slot), + })); + this.emit(WANT_TO_SLASH_EVENT, args); + } + + /** + * Returns the union of: + * 1. attesters whose signatures landed in the published checkpoint on L1, and + * 2. attesters we observed signing the same proposal on p2p (the proposer publishes as + * soon as it has hit committee quorum, so honest peer attestations that arrive after + * that point are dropped — but they still vouched for the data and + * should be slashed for withholding it). + * + * + * Exposed as protected so tests can substitute a deterministic recovery without having + * to construct real secp256k1 signatures. + */ + protected async extractAttesters(published: PublishedCheckpoint): Promise { + const fromL1 = getAttestationInfoFromPublishedCheckpoint(published, this.signatureContext) + .filter(info => info.status === 'recovered-from-signature') + .map(info => info.address); + + const slot = published.checkpoint.header.slotNumber; + const proposalPayloadHash = CheckpointProposalHash.fromBuffer( + ConsensusPayload.fromCheckpoint(published.checkpoint, this.signatureContext).getPayloadHash(), + ); + const fromP2p = await this.p2p + .getCheckpointAttestationsForSlot(slot, proposalPayloadHash) + .then(attestations => attestations.map(a => a.getSender())); + + // Dedupe + const all = new Map(); + for (const addr of compactArray([...fromL1, ...fromP2p])) { + all.set(addr.toString(), addr); + } + return [...all.values()]; + } +} diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts deleted file mode 100644 index cca7caf88caa..000000000000 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.test.ts +++ /dev/null @@ -1,260 +0,0 @@ -import type { EpochCache } from '@aztec/epoch-cache'; -import { BlockNumber, CheckpointNumber, EpochNumber, SlotNumber } from '@aztec/foundation/branded-types'; -import { EthAddress } from '@aztec/foundation/eth-address'; -import { sleep } from '@aztec/foundation/sleep'; -import { L2Block, type L2BlockSourceEventEmitter, L2BlockSourceEvents } from '@aztec/stdlib/block'; -import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; -import type { - ICheckpointBlockBuilder, - ICheckpointsBuilder, - ITxProvider, - MerkleTreeWriteOperations, -} from '@aztec/stdlib/interfaces/server'; -import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; -import { OffenseType } from '@aztec/stdlib/slashing'; -import { Tx } from '@aztec/stdlib/tx'; - -import { jest } from '@jest/globals'; -import { type MockProxy, mock } from 'jest-mock-extended'; -import EventEmitter from 'node:events'; -import type { Hex } from 'viem'; - -import { WANT_TO_SLASH_EVENT, type WantToSlashArgs } from '../watcher.js'; -import { EpochPruneWatcher } from './epoch_prune_watcher.js'; - -describe('EpochPruneWatcher', () => { - let watcher: EpochPruneWatcher; - let l2BlockSource: L2BlockSourceEventEmitter; - let l1ToL2MessageSource: MockProxy; - let epochCache: MockProxy; - let txProvider: MockProxy>; - let checkpointsBuilder: MockProxy; - let checkpointBuilder: MockProxy; - let fork: MockProxy; - - let ts: bigint; - let l1Constants: L1RollupConstants; - - const validEpochPrunedPenalty = BigInt(1000000000000000000n); - const dataWithholdingPenalty = BigInt(2000000000000000000n); - - beforeEach(async () => { - l2BlockSource = new MockL2BlockSource() as unknown as L2BlockSourceEventEmitter; - l1ToL2MessageSource = mock(); - l1ToL2MessageSource.getL1ToL2Messages.mockResolvedValue([]); - epochCache = mock(); - txProvider = mock>(); - checkpointsBuilder = mock(); - checkpointBuilder = mock(); - fork = mock(); - checkpointsBuilder.getFork.mockResolvedValue(fork); - checkpointsBuilder.startCheckpoint.mockResolvedValue(checkpointBuilder); - - ts = BigInt(Math.ceil(Date.now() / 1000)); - l1Constants = { - l1StartBlock: 1n, - l1GenesisTime: ts, - slotDuration: 24, - epochDuration: 8, - ethereumSlotDuration: 12, - proofSubmissionEpochs: 1, - targetCommitteeSize: 48, - rollupManaLimit: Number.MAX_SAFE_INTEGER, - }; - - epochCache.getL1Constants.mockReturnValue(l1Constants); - - watcher = new EpochPruneWatcher(l2BlockSource, l1ToL2MessageSource, epochCache, txProvider, checkpointsBuilder, { - slashPrunePenalty: validEpochPrunedPenalty, - slashDataWithholdingPenalty: dataWithholdingPenalty, - }); - await watcher.start(); - }); - - afterEach(async () => { - await watcher.stop(); - }); - - it('should emit WANT_TO_SLASH_EVENT when a validator is in a pruned epoch when data is unavailable', async () => { - const emitSpy = jest.spyOn(watcher, 'emit'); - const epochNumber = EpochNumber(1); - const checkpointNumber = CheckpointNumber(1); - - const block = await L2Block.random( - BlockNumber(12), // block number - { - txsPerBlock: 4, - slotNumber: SlotNumber(10), - checkpointNumber, - }, - ); - txProvider.getAvailableTxs.mockResolvedValue({ txs: [], missingTxs: [block.body.txEffects[0].txHash] }); - - const committee: Hex[] = [ - '0x0000000000000000000000000000000000000abc', - '0x0000000000000000000000000000000000000def', - ]; - epochCache.getCommitteeForEpoch.mockResolvedValue({ - committee: committee.map(EthAddress.fromString), - seed: 0n, - epoch: epochNumber, - isEscapeHatchOpen: false, - }); - - l2BlockSource.events.emit(L2BlockSourceEvents.L2PruneUnproven, { - epochNumber: EpochNumber(1), - blocks: [block], - type: L2BlockSourceEvents.L2PruneUnproven, - }); - - // Just need to yield to the event loop to clear our synchronous promises - await sleep(0); - - expect(emitSpy).toHaveBeenCalledWith(WANT_TO_SLASH_EVENT, [ - { - validator: EthAddress.fromString(committee[0]), - amount: dataWithholdingPenalty, - offenseType: OffenseType.DATA_WITHHOLDING, - epochOrSlot: BigInt(epochNumber), - }, - { - validator: EthAddress.fromString(committee[1]), - amount: dataWithholdingPenalty, - offenseType: OffenseType.DATA_WITHHOLDING, - epochOrSlot: BigInt(epochNumber), - }, - ] satisfies WantToSlashArgs[]); - }); - - it('should slash if the data is available and the epoch could have been proven', async () => { - const emitSpy = jest.spyOn(watcher, 'emit'); - const checkpointNumber = CheckpointNumber(1); - - const block = await L2Block.random( - BlockNumber(12), // block number - { - txsPerBlock: 4, - slotNumber: SlotNumber(10), - checkpointNumber, - }, - ); - const tx = Tx.random(); - txProvider.getAvailableTxs.mockResolvedValue({ txs: [tx], missingTxs: [] }); - checkpointBuilder.buildBlock.mockResolvedValue({ - block: block, - failedTxs: [], - numTxs: 1, - } as any); - - const committee: Hex[] = [ - '0x0000000000000000000000000000000000000abc', - '0x0000000000000000000000000000000000000def', - ]; - epochCache.getCommitteeForEpoch.mockResolvedValue({ - committee: committee.map(EthAddress.fromString), - seed: 0n, - epoch: EpochNumber(1), - isEscapeHatchOpen: false, - }); - - l2BlockSource.events.emit(L2BlockSourceEvents.L2PruneUnproven, { - epochNumber: EpochNumber(1), - blocks: [block], - type: L2BlockSourceEvents.L2PruneUnproven, - }); - - // Just need to yield to the event loop to clear our synchronous promises - await sleep(0); - - expect(emitSpy).toHaveBeenCalledWith(WANT_TO_SLASH_EVENT, [ - { - validator: EthAddress.fromString(committee[0]), - amount: validEpochPrunedPenalty, - offenseType: OffenseType.VALID_EPOCH_PRUNED, - epochOrSlot: 1n, - }, - { - validator: EthAddress.fromString(committee[1]), - amount: validEpochPrunedPenalty, - offenseType: OffenseType.VALID_EPOCH_PRUNED, - epochOrSlot: 1n, - }, - ] satisfies WantToSlashArgs[]); - - expect(checkpointsBuilder.startCheckpoint).toHaveBeenCalled(); - expect(checkpointBuilder.buildBlock).toHaveBeenCalledWith( - [tx], - block.header.globalVariables.blockNumber, - block.header.globalVariables.timestamp, - { isBuildingProposal: false, minValidTxs: 0 }, - ); - }); - - it('should not slash if the data is available but the epoch could not have been proven', async () => { - const emitSpy = jest.spyOn(watcher, 'emit'); - const checkpointNumber = CheckpointNumber(1); - - const blockFromL1 = await L2Block.random( - BlockNumber(12), // block number - { - txsPerBlock: 1, - slotNumber: SlotNumber(10), - checkpointNumber, - }, - ); - - const blockFromBuilder = await L2Block.random( - BlockNumber(13), // block number - { - txsPerBlock: 1, - slotNumber: SlotNumber(10), - checkpointNumber, - }, - ); - const tx = Tx.random(); - txProvider.getAvailableTxs.mockResolvedValue({ txs: [tx], missingTxs: [] }); - checkpointBuilder.buildBlock.mockResolvedValue({ - block: blockFromBuilder, - failedTxs: [], - numTxs: 1, - } as any); - - const committee: Hex[] = [ - '0x0000000000000000000000000000000000000abc', - '0x0000000000000000000000000000000000000def', - ]; - epochCache.getCommitteeForEpoch.mockResolvedValue({ - committee: committee.map(EthAddress.fromString), - seed: 0n, - epoch: EpochNumber(1), - isEscapeHatchOpen: false, - }); - - l2BlockSource.events.emit(L2BlockSourceEvents.L2PruneUnproven, { - epochNumber: EpochNumber(1), - blocks: [blockFromL1], - type: L2BlockSourceEvents.L2PruneUnproven, - }); - - // Just need to yield to the event loop to clear our synchronous promises - await sleep(0); - - expect(emitSpy).not.toHaveBeenCalled(); - - expect(checkpointsBuilder.startCheckpoint).toHaveBeenCalled(); - expect(checkpointBuilder.buildBlock).toHaveBeenCalledWith( - [tx], - blockFromL1.header.globalVariables.blockNumber, - blockFromL1.header.globalVariables.timestamp, - { isBuildingProposal: false, minValidTxs: 0 }, - ); - }); -}); - -class MockL2BlockSource { - public readonly events = new EventEmitter(); - public getCheckpoints = () => []; - public getCheckpointsData = () => []; - - constructor() {} -} diff --git a/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts b/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts deleted file mode 100644 index bfbed6d1f552..000000000000 --- a/yarn-project/slasher/src/watchers/epoch_prune_watcher.ts +++ /dev/null @@ -1,256 +0,0 @@ -import { EpochCache } from '@aztec/epoch-cache'; -import { BlockNumber, EpochNumber } from '@aztec/foundation/branded-types'; -import { chunkBy, merge, pick } from '@aztec/foundation/collection'; -import type { Fr } from '@aztec/foundation/curves/bn254'; -import { type Logger, createLogger } from '@aztec/foundation/log'; -import { - EthAddress, - L2Block, - type L2BlockSourceEventEmitter, - L2BlockSourceEvents, - type L2PruneUnprovenEvent, -} from '@aztec/stdlib/block'; -import { getEpochAtSlot } from '@aztec/stdlib/epoch-helpers'; -import type { - ICheckpointBlockBuilder, - ICheckpointsBuilder, - ITxProvider, - MerkleTreeWriteOperations, - SlasherConfig, -} from '@aztec/stdlib/interfaces/server'; -import { type L1ToL2MessageSource, computeCheckpointOutHash } from '@aztec/stdlib/messaging'; -import { OffenseType, getOffenseTypeName } from '@aztec/stdlib/slashing'; -import type { CheckpointGlobalVariables } from '@aztec/stdlib/tx'; -import { - ReExFailedTxsError, - ReExStateMismatchError, - TransactionsNotAvailableError, - ValidatorError, -} from '@aztec/stdlib/validators'; - -import EventEmitter from 'node:events'; - -import { WANT_TO_SLASH_EVENT, type WantToSlashArgs, type Watcher, type WatcherEmitter } from '../watcher.js'; - -const EpochPruneWatcherPenaltiesConfigKeys = ['slashPrunePenalty', 'slashDataWithholdingPenalty'] as const; - -type EpochPruneWatcherPenalties = Pick; - -/** - * This watcher is responsible for detecting chain prunes and creating slashing arguments for the committee. - * It only wants to slash if: - * - the transactions are not available - * - OR the archive roots match when re-building all the blocks in the epoch (i.e. the epoch *could* have been proven) - */ -export class EpochPruneWatcher extends (EventEmitter as new () => WatcherEmitter) implements Watcher { - private log: Logger = createLogger('epoch-prune-watcher'); - - // Store bound function reference for proper listener removal - private boundHandlePruneL2Blocks = this.handlePruneL2Blocks.bind(this); - - private penalties: EpochPruneWatcherPenalties; - - constructor( - private l2BlockSource: L2BlockSourceEventEmitter, - private l1ToL2MessageSource: L1ToL2MessageSource, - private epochCache: EpochCache, - private txProvider: Pick, - private checkpointsBuilder: ICheckpointsBuilder, - penalties: EpochPruneWatcherPenalties, - ) { - super(); - this.penalties = pick(penalties, ...EpochPruneWatcherPenaltiesConfigKeys); - this.log.verbose( - `EpochPruneWatcher initialized with penalties: valid epoch pruned=${penalties.slashPrunePenalty} data withholding=${penalties.slashDataWithholdingPenalty}`, - ); - } - - public start() { - this.l2BlockSource.events.on(L2BlockSourceEvents.L2PruneUnproven, this.boundHandlePruneL2Blocks); - return Promise.resolve(); - } - - public stop() { - this.l2BlockSource.events.removeListener(L2BlockSourceEvents.L2PruneUnproven, this.boundHandlePruneL2Blocks); - return Promise.resolve(); - } - - public updateConfig(config: Partial): void { - this.penalties = merge(this.penalties, pick(config, ...EpochPruneWatcherPenaltiesConfigKeys)); - this.log.verbose('EpochPruneWatcher config updated', this.penalties); - } - - private handlePruneL2Blocks(event: L2PruneUnprovenEvent): void { - const { blocks, epochNumber } = event; - void this.processPruneL2Blocks(blocks, epochNumber).catch(err => - this.log.error('Error processing pruned L2 blocks', err, { epochNumber }), - ); - } - - private async emitSlashForEpoch(offense: OffenseType, epochNumber: EpochNumber): Promise { - const validators = await this.getValidatorsForEpoch(epochNumber); - if (validators.length === 0) { - this.log.warn(`No validators found for epoch ${epochNumber} (cannot slash for ${getOffenseTypeName(offense)})`); - return; - } - const args = this.validatorsToSlashingArgs(validators, offense, epochNumber); - this.log.verbose(`Created slash for ${getOffenseTypeName(offense)} at epoch ${epochNumber}`, args); - this.emit(WANT_TO_SLASH_EVENT, args); - } - - private async processPruneL2Blocks(blocks: L2Block[], epochNumber: EpochNumber): Promise { - try { - const l1Constants = this.epochCache.getL1Constants(); - const epochBlocks = blocks.filter(b => getEpochAtSlot(b.header.getSlot(), l1Constants) === epochNumber); - this.log.info( - `Detected chain prune. Validating epoch ${epochNumber} with blocks ${epochBlocks[0]?.number} to ${epochBlocks[epochBlocks.length - 1]?.number}.`, - { blocks: epochBlocks.map(b => b.toBlockInfo()) }, - ); - - await this.validateBlocks(epochBlocks, epochNumber); - this.log.info(`Pruned epoch ${epochNumber} was valid. Want to slash committee for not having it proven.`); - await this.emitSlashForEpoch(OffenseType.VALID_EPOCH_PRUNED, epochNumber); - } catch (error) { - if (error instanceof TransactionsNotAvailableError) { - this.log.info(`Data for pruned epoch ${epochNumber} was not available. Will want to slash.`, { - message: error.message, - }); - await this.emitSlashForEpoch(OffenseType.DATA_WITHHOLDING, epochNumber); - } else { - this.log.error(`Error while validating pruned epoch ${epochNumber}. Will not want to slash.`, error); - } - } - } - - public async validateBlocks(blocks: L2Block[], epochNumber: EpochNumber): Promise { - if (blocks.length === 0) { - return; - } - - // Sort blocks by block number and group by checkpoint - const sortedBlocks = [...blocks].sort((a, b) => a.number - b.number); - const blocksByCheckpoint = chunkBy(sortedBlocks, b => b.checkpointNumber); - - // Get prior checkpoints in the epoch (in case this was a partial prune) to extract the out hashes - const priorCheckpointOutHashes = (await this.l2BlockSource.getCheckpointsData({ epoch: epochNumber })) - .filter(c => c.checkpointNumber < sortedBlocks[0].checkpointNumber) - .map(c => c.checkpointOutHash); - let previousCheckpointOutHashes: Fr[] = [...priorCheckpointOutHashes]; - - const fork = await this.checkpointsBuilder.getFork( - BlockNumber(sortedBlocks[0].header.globalVariables.blockNumber - 1), - ); - try { - for (const checkpointBlocks of blocksByCheckpoint) { - await this.validateCheckpoint(checkpointBlocks, previousCheckpointOutHashes, fork); - - // Compute checkpoint out hash from all blocks in this checkpoint - const checkpointOutHash = computeCheckpointOutHash( - checkpointBlocks.map(b => b.body.txEffects.map(tx => tx.l2ToL1Msgs)), - ); - previousCheckpointOutHashes = [...previousCheckpointOutHashes, checkpointOutHash]; - } - } finally { - await fork.close(); - } - } - - private async validateCheckpoint( - checkpointBlocks: L2Block[], - previousCheckpointOutHashes: Fr[], - fork: MerkleTreeWriteOperations, - ): Promise { - const checkpointNumber = checkpointBlocks[0].checkpointNumber; - this.log.debug(`Validating pruned checkpoint ${checkpointNumber} with ${checkpointBlocks.length} blocks`); - - // Get L1ToL2Messages once for the entire checkpoint - const l1ToL2Messages = await this.l1ToL2MessageSource.getL1ToL2Messages(checkpointNumber); - - // Build checkpoint constants from first block's global variables - const gv = checkpointBlocks[0].header.globalVariables; - const constants: CheckpointGlobalVariables = { - chainId: gv.chainId, - version: gv.version, - slotNumber: gv.slotNumber, - timestamp: gv.timestamp, - coinbase: gv.coinbase, - feeRecipient: gv.feeRecipient, - gasFees: gv.gasFees, - }; - - // Start checkpoint builder once for all blocks in this checkpoint - const checkpointBuilder = await this.checkpointsBuilder.startCheckpoint( - checkpointNumber, - constants, - 0n, // feeAssetPriceModifier is not used for validation of the checkpoint content - l1ToL2Messages, - previousCheckpointOutHashes, - fork, - this.log.getBindings(), - ); - - // Validate all blocks in the checkpoint sequentially - for (const block of checkpointBlocks) { - await this.validateBlockInCheckpoint(block, checkpointBuilder); - } - } - - private async validateBlockInCheckpoint( - blockFromL1: L2Block, - checkpointBuilder: ICheckpointBlockBuilder, - ): Promise { - this.log.debug(`Validating pruned block ${blockFromL1.header.globalVariables.blockNumber}`); - const txHashes = blockFromL1.body.txEffects.map(txEffect => txEffect.txHash); - // We load txs from the mempool directly, since the TxCollector running in the background has already been - // trying to fetch them from nodes or via reqresp. If we haven't managed to collect them by now, - // it's likely that they are not available in the network at all. - const { txs, missingTxs } = await this.txProvider.getAvailableTxs(txHashes); - - if (missingTxs && missingTxs.length > 0) { - throw new TransactionsNotAvailableError(missingTxs); - } - - const gv = blockFromL1.header.globalVariables; - const { block, failedTxs, numTxs } = await checkpointBuilder.buildBlock(txs, gv.blockNumber, gv.timestamp, { - isBuildingProposal: false, - minValidTxs: 0, - }); - - if (numTxs !== txs.length) { - // This should be detected by state mismatch, but this makes it easier to debug. - throw new ValidatorError(`Built block with ${numTxs} txs, expected ${txs.length}`); - } - if (failedTxs.length > 0) { - throw new ReExFailedTxsError(failedTxs.length); - } - if (!block.archive.root.equals(blockFromL1.archive.root)) { - throw new ReExStateMismatchError(blockFromL1.archive.root, block.archive.root); - } - } - - private async getValidatorsForEpoch(epochNumber: EpochNumber): Promise { - const { committee } = await this.epochCache.getCommitteeForEpoch(epochNumber); - if (!committee) { - this.log.trace(`No committee found for epoch ${epochNumber}`); - return []; - } - return committee; - } - - private validatorsToSlashingArgs( - validators: EthAddress[], - offenseType: OffenseType, - epochOrSlot: EpochNumber, - ): WantToSlashArgs[] { - const penalty = - offenseType === OffenseType.DATA_WITHHOLDING - ? this.penalties.slashDataWithholdingPenalty - : this.penalties.slashPrunePenalty; - return validators.map(v => ({ - validator: v, - amount: penalty, - offenseType, - epochOrSlot: BigInt(epochOrSlot), - })); - } -} diff --git a/yarn-project/stdlib/src/checkpoint/checkpoint_reexecution_tracker.ts b/yarn-project/stdlib/src/checkpoint/checkpoint_reexecution_tracker.ts new file mode 100644 index 000000000000..0f97d103be2e --- /dev/null +++ b/yarn-project/stdlib/src/checkpoint/checkpoint_reexecution_tracker.ts @@ -0,0 +1,49 @@ +import type { CheckpointNumber } from '@aztec/foundation/branded-types'; +import type { Fr } from '@aztec/foundation/curves/bn254'; + +/** + * Tracks checkpoints we have successfully re-executed locally. + * + * Entries are keyed by (checkpoint number, archive root) so two competing checkpoints at + * the same number (e.g. equivocation) are tracked independently. + */ +export interface CheckpointReexecutionTracker { + /** Record a successful re-execution for the given (checkpoint number, archive root). */ + recordReexecuted(checkpointNumber: CheckpointNumber, archiveRoot: Fr): void; + + /** Returns true if the given (checkpoint number, archive root) has been re-executed locally. */ + hasReexecuted(checkpointNumber: CheckpointNumber, archiveRoot: Fr): boolean; + + /** Drops entries for checkpoints with `number < checkpointNumber`. */ + removeBefore(checkpointNumber: CheckpointNumber): void; +} + +/** + * In-memory tracker backed by a per-checkpoint map of archive-root strings. Cleanup is + * driven externally via `removeBefore` (typically by the proposal handler, once a + * checkpoint reaches L1 finality). + */ +export class InMemoryCheckpointReexecutionTracker implements CheckpointReexecutionTracker { + private readonly entries = new Map>(); + + public recordReexecuted(checkpointNumber: CheckpointNumber, archiveRoot: Fr): void { + let set = this.entries.get(checkpointNumber); + if (!set) { + set = new Set(); + this.entries.set(checkpointNumber, set); + } + set.add(archiveRoot.toString()); + } + + public hasReexecuted(checkpointNumber: CheckpointNumber, archiveRoot: Fr): boolean { + return this.entries.get(checkpointNumber)?.has(archiveRoot.toString()) ?? false; + } + + public removeBefore(checkpointNumber: CheckpointNumber): void { + for (const n of this.entries.keys()) { + if (n < checkpointNumber) { + this.entries.delete(n); + } + } + } +} diff --git a/yarn-project/stdlib/src/checkpoint/index.ts b/yarn-project/stdlib/src/checkpoint/index.ts index 33dec7639e8c..cff0a7d04b2d 100644 --- a/yarn-project/stdlib/src/checkpoint/index.ts +++ b/yarn-project/stdlib/src/checkpoint/index.ts @@ -1,6 +1,7 @@ export * from './checkpoint.js'; export * from './checkpoint_data.js'; export * from './checkpoint_info.js'; +export * from './checkpoint_reexecution_tracker.js'; export * from './digest.js'; export * from './previous_checkpoint_out_hashes.js'; export * from './published_checkpoint.js'; diff --git a/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts b/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts index 43de98324029..d79f7e11deea 100644 --- a/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts +++ b/yarn-project/stdlib/src/interfaces/aztec-node-admin.test.ts @@ -107,8 +107,8 @@ class MockAztecNodeAdmin implements AztecNodeAdmin { slashAmountLarge: 2000n, slashValidatorsAlways: [], slashValidatorsNever: [], - slashPrunePenalty: 1000n, slashDataWithholdingPenalty: 1000n, + slashDataWithholdingToleranceSlots: 3, slashInactivityTargetPercentage: 0.5, slashInactivityConsecutiveEpochThreshold: 1, slashInactivityPenalty: 1000n, diff --git a/yarn-project/stdlib/src/interfaces/slasher.ts b/yarn-project/stdlib/src/interfaces/slasher.ts index 0dc264a0c183..1de9b9da5de6 100644 --- a/yarn-project/stdlib/src/interfaces/slasher.ts +++ b/yarn-project/stdlib/src/interfaces/slasher.ts @@ -10,8 +10,13 @@ export interface SlasherConfig { slashValidatorsNever: EthAddress[]; // Array of validator addresses slashInactivityTargetPercentage: number; // 0-1, 0.9 means 90%. Must be greater than 0 slashInactivityConsecutiveEpochThreshold: number; // Number of consecutive epochs a validator must be inactive before slashing - slashPrunePenalty: bigint; slashDataWithholdingPenalty: bigint; + /** + * Number of full L2 slots that must elapse after a checkpoint's slot before declaring its + * txs missing and slashing the checkpoint's attesters for data withholding. With tolerance + * = N and checkpoint slot S, the check fires at the start of slot `S + N + 1`. + */ + slashDataWithholdingToleranceSlots: number; slashInactivityPenalty: bigint; slashBroadcastedInvalidBlockPenalty: bigint; slashBroadcastedInvalidCheckpointProposalPenalty: bigint; @@ -32,8 +37,8 @@ export const SlasherConfigSchema = zodFor()( slashOverridePayload: schemas.EthAddress.optional(), slashValidatorsAlways: z.array(schemas.EthAddress), slashValidatorsNever: z.array(schemas.EthAddress), - slashPrunePenalty: schemas.BigInt, slashDataWithholdingPenalty: schemas.BigInt, + slashDataWithholdingToleranceSlots: z.number(), slashInactivityTargetPercentage: z.number(), slashInactivityConsecutiveEpochThreshold: z.number(), slashInactivityPenalty: schemas.BigInt, diff --git a/yarn-project/stdlib/src/interfaces/tx_provider.ts b/yarn-project/stdlib/src/interfaces/tx_provider.ts index 4113b69cae5f..b434b8052c42 100644 --- a/yarn-project/stdlib/src/interfaces/tx_provider.ts +++ b/yarn-project/stdlib/src/interfaces/tx_provider.ts @@ -7,6 +7,12 @@ import type { PeerId } from '@libp2p/interface'; export interface ITxProvider { getAvailableTxs(txHashes: TxHash[]): Promise<{ txs: Tx[]; missingTxs: TxHash[] }>; + /** + * Checks whether each tx hash is currently held by the local tx pool. Returns a parallel + * boolean array (one entry per input hash). Does not fetch from the network. + */ + hasTxs(txHashes: TxHash[]): Promise; + getTxsForBlockProposal( blockProposal: BlockProposal, blockNumber: number, diff --git a/yarn-project/stdlib/src/slashing/helpers.test.ts b/yarn-project/stdlib/src/slashing/helpers.test.ts index 98588f87d9f1..c0386967028a 100644 --- a/yarn-project/stdlib/src/slashing/helpers.test.ts +++ b/yarn-project/stdlib/src/slashing/helpers.test.ts @@ -178,7 +178,7 @@ describe('SlashingHelpers', () => { it('handles epoch-based offense that spans multiple rounds', () => { const offense = { epochOrSlot: 2n, // epoch 2 = slot 8 - offenseType: OffenseType.DATA_WITHHOLDING, + offenseType: OffenseType.INACTIVITY, }; const round = getRoundForOffense(offense, constants); expect(round).toEqual(0n); // slot 8 / roundSize 10 = round 0 @@ -187,7 +187,7 @@ describe('SlashingHelpers', () => { it('handles epoch-based offense when round is multiple of epoch duration', () => { const offense = { epochOrSlot: 2n, // epoch 2 = slot 8 - offenseType: OffenseType.DATA_WITHHOLDING, + offenseType: OffenseType.INACTIVITY, }; const round = getRoundForOffense(offense, { ...constants, slashingRoundSize: 8 }); expect(round).toEqual(1n); // slot 8 / roundSize 8 = round 1 @@ -203,7 +203,6 @@ describe('SlashingHelpers', () => { slashDuplicateProposalPenalty: 3n, slashDuplicateAttestationPenalty: 4n, slashAttestInvalidCheckpointProposalPenalty: 5n, - slashPrunePenalty: 6n, slashDataWithholdingPenalty: 7n, slashUnknownPenalty: 8n, slashInactivityPenalty: 9n, @@ -221,7 +220,6 @@ describe('SlashingHelpers', () => { slashDuplicateProposalPenalty: 3n, slashDuplicateAttestationPenalty: 4n, slashAttestInvalidCheckpointProposalPenalty: 5n, - slashPrunePenalty: 6n, slashDataWithholdingPenalty: 7n, slashUnknownPenalty: 8n, slashInactivityPenalty: 9n, diff --git a/yarn-project/stdlib/src/slashing/helpers.ts b/yarn-project/stdlib/src/slashing/helpers.ts index 135c0d247c29..b3150e1f4c54 100644 --- a/yarn-project/stdlib/src/slashing/helpers.ts +++ b/yarn-project/stdlib/src/slashing/helpers.ts @@ -54,7 +54,6 @@ export function getPenaltyForOffense( | 'slashDuplicateProposalPenalty' | 'slashDuplicateAttestationPenalty' | 'slashAttestInvalidCheckpointProposalPenalty' - | 'slashPrunePenalty' | 'slashDataWithholdingPenalty' | 'slashUnknownPenalty' | 'slashInactivityPenalty' @@ -62,8 +61,6 @@ export function getPenaltyForOffense( >, ) { switch (offense) { - case OffenseType.VALID_EPOCH_PRUNED: - return config.slashPrunePenalty; case OffenseType.DATA_WITHHOLDING: return config.slashDataWithholdingPenalty; case OffenseType.INACTIVITY: @@ -97,6 +94,7 @@ export function getTimeUnitForOffense(offense: OffenseType): 'epoch' | 'slot' { switch (offense) { case OffenseType.ATTESTED_DESCENDANT_OF_INVALID: case OffenseType.BROADCASTED_INVALID_BLOCK_PROPOSAL: + case OffenseType.DATA_WITHHOLDING: case OffenseType.BROADCASTED_INVALID_CHECKPOINT_PROPOSAL: case OffenseType.DUPLICATE_PROPOSAL: case OffenseType.DUPLICATE_ATTESTATION: @@ -105,9 +103,7 @@ export function getTimeUnitForOffense(offense: OffenseType): 'epoch' | 'slot' { case OffenseType.PROPOSED_INSUFFICIENT_ATTESTATIONS: return 'slot'; case OffenseType.INACTIVITY: - case OffenseType.DATA_WITHHOLDING: case OffenseType.UNKNOWN: - case OffenseType.VALID_EPOCH_PRUNED: return 'epoch'; default: { const _exhaustiveCheck: never = offense; diff --git a/yarn-project/stdlib/src/slashing/serialization.test.ts b/yarn-project/stdlib/src/slashing/serialization.test.ts index 9c84b476a351..91de7e0b0108 100644 --- a/yarn-project/stdlib/src/slashing/serialization.test.ts +++ b/yarn-project/stdlib/src/slashing/serialization.test.ts @@ -92,7 +92,7 @@ describe('slashing/serialization', () => { const validator2 = EthAddress.fromString('0x2222222222222222222222222222222222222222'); const offense1 = createOffense(validator1, 500n, OffenseType.DATA_WITHHOLDING, 25n); - const offense2 = createOffense(validator2, 750n, OffenseType.VALID_EPOCH_PRUNED, 30n); + const offense2 = createOffense(validator2, 750n, OffenseType.INACTIVITY, 30n); const serialized1 = serializeOffense(offense1); const deserialized1 = deserializeOffense(serialized1); @@ -107,7 +107,7 @@ describe('slashing/serialization', () => { expect(deserialized2.validator).toEqual(validator2); expect(deserialized2.amount).toEqual(750n); - expect(deserialized2.offenseType).toEqual(OffenseType.VALID_EPOCH_PRUNED); + expect(deserialized2.offenseType).toEqual(OffenseType.INACTIVITY); expect(deserialized2.epochOrSlot).toEqual(30n); // Ensure they produce different serialized data @@ -160,7 +160,7 @@ describe('slashing/serialization', () => { const epochOffenses = [ OffenseType.INACTIVITY, OffenseType.DATA_WITHHOLDING, - OffenseType.VALID_EPOCH_PRUNED, + OffenseType.INACTIVITY, OffenseType.UNKNOWN, ]; diff --git a/yarn-project/stdlib/src/slashing/types.ts b/yarn-project/stdlib/src/slashing/types.ts index 531489009866..9ec628814496 100644 --- a/yarn-project/stdlib/src/slashing/types.ts +++ b/yarn-project/stdlib/src/slashing/types.ts @@ -6,10 +6,8 @@ import { schemas, zodFor } from '../schemas/index.js'; export enum OffenseType { UNKNOWN = 0, - /** The data for proving an epoch was not publicly available, we slash its committee */ + /** The data for the txs in a published checkpoint was not available within the tolerance window, we slash the checkpoint's attesters */ DATA_WITHHOLDING = 1, - /** An epoch was not successfully proven in time, we slash its committee */ - VALID_EPOCH_PRUNED = 2, /** A proposer failed to attest or propose during an epoch according to the Sentinel */ INACTIVITY = 3, /** A proposer sent an invalid block proposal over the p2p network to the committee */ @@ -36,8 +34,6 @@ export function getOffenseTypeName(offense: OffenseType) { return 'unknown'; case OffenseType.DATA_WITHHOLDING: return 'data_withholding'; - case OffenseType.VALID_EPOCH_PRUNED: - return 'valid_epoch_pruned'; case OffenseType.INACTIVITY: return 'inactivity'; case OffenseType.BROADCASTED_INVALID_BLOCK_PROPOSAL: @@ -66,7 +62,6 @@ export const OffenseTypeSchema = z.nativeEnum(OffenseType); export const OffenseToBigInt: Record = { [OffenseType.UNKNOWN]: 0n, [OffenseType.DATA_WITHHOLDING]: 1n, - [OffenseType.VALID_EPOCH_PRUNED]: 2n, [OffenseType.INACTIVITY]: 3n, [OffenseType.BROADCASTED_INVALID_BLOCK_PROPOSAL]: 4n, [OffenseType.PROPOSED_INSUFFICIENT_ATTESTATIONS]: 5n, @@ -84,8 +79,6 @@ export function bigIntToOffense(offense: bigint): OffenseType { return OffenseType.UNKNOWN; case 1n: return OffenseType.DATA_WITHHOLDING; - case 2n: - return OffenseType.VALID_EPOCH_PRUNED; case 3n: return OffenseType.INACTIVITY; case 4n: diff --git a/yarn-project/stdlib/src/slashing/votes.test.ts b/yarn-project/stdlib/src/slashing/votes.test.ts index 17cad2d71862..9cd770f16ca8 100644 --- a/yarn-project/stdlib/src/slashing/votes.test.ts +++ b/yarn-project/stdlib/src/slashing/votes.test.ts @@ -275,13 +275,13 @@ describe('SlashingHelpers', () => { { validator: mockValidator1, amount: 7n, - offenseType: OffenseType.DATA_WITHHOLDING, + offenseType: OffenseType.INACTIVITY, epochOrSlot: 3n, }, { validator: mockValidator1, amount: 5n, - offenseType: OffenseType.VALID_EPOCH_PRUNED, + offenseType: OffenseType.INACTIVITY, epochOrSlot: 3n, }, ]; @@ -530,10 +530,10 @@ describe('SlashingHelpers', () => { // Truncation must cut one validator (not one offense record). const offenses: Offense[] = [ { validator: mockValidator1, amount: 15n, offenseType: OffenseType.INACTIVITY, epochOrSlot: 5n }, - { validator: mockValidator1, amount: 8n, offenseType: OffenseType.DATA_WITHHOLDING, epochOrSlot: 5n }, - { validator: mockValidator1, amount: 5n, offenseType: OffenseType.VALID_EPOCH_PRUNED, epochOrSlot: 5n }, + { validator: mockValidator1, amount: 8n, offenseType: OffenseType.INACTIVITY, epochOrSlot: 5n }, + { validator: mockValidator1, amount: 5n, offenseType: OffenseType.INACTIVITY, epochOrSlot: 5n }, { validator: mockValidator2, amount: 20n, offenseType: OffenseType.INACTIVITY, epochOrSlot: 5n }, - { validator: mockValidator2, amount: 5n, offenseType: OffenseType.DATA_WITHHOLDING, epochOrSlot: 5n }, + { validator: mockValidator2, amount: 5n, offenseType: OffenseType.INACTIVITY, epochOrSlot: 5n }, { validator: mockValidator3, amount: 10n, offenseType: OffenseType.INACTIVITY, epochOrSlot: 5n }, ]; diff --git a/yarn-project/validator-client/src/factory.ts b/yarn-project/validator-client/src/factory.ts index 9917d528ca9f..02325c937789 100644 --- a/yarn-project/validator-client/src/factory.ts +++ b/yarn-project/validator-client/src/factory.ts @@ -4,6 +4,7 @@ import type { DateProvider } from '@aztec/foundation/timer'; import type { KeystoreManager } from '@aztec/node-keystore'; import { BlockProposalValidator, type P2PClient } from '@aztec/p2p'; import type { L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; +import type { CheckpointReexecutionTracker } from '@aztec/stdlib/checkpoint'; import type { ValidatorClientFullConfig, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; import type { TelemetryClient } from '@aztec/telemetry-client'; @@ -26,6 +27,7 @@ export function createProposalHandler( blobClient: BlobClientInterface; dateProvider: DateProvider; telemetry: TelemetryClient; + reexecutionTracker: CheckpointReexecutionTracker; }, ) { const metrics = new ValidatorMetrics(deps.telemetry); @@ -48,6 +50,7 @@ export function createProposalHandler( deps.epochCache, config, deps.blobClient, + deps.reexecutionTracker, metrics, deps.dateProvider, deps.telemetry, @@ -68,6 +71,7 @@ export function createValidatorClient( epochCache: EpochCache; keyStoreManager: KeystoreManager | undefined; blobClient: BlobClientInterface; + reexecutionTracker: CheckpointReexecutionTracker; slashingProtectionDb?: SlashingProtectionDatabase; }, ) { @@ -87,6 +91,7 @@ export function createValidatorClient( txProvider, deps.keyStoreManager, deps.blobClient, + deps.reexecutionTracker, deps.dateProvider, deps.telemetry, deps.slashingProtectionDb, diff --git a/yarn-project/validator-client/src/proposal_handler.test.ts b/yarn-project/validator-client/src/proposal_handler.test.ts index b464dd7cd7a9..211f5b5e2893 100644 --- a/yarn-project/validator-client/src/proposal_handler.test.ts +++ b/yarn-project/validator-client/src/proposal_handler.test.ts @@ -9,7 +9,7 @@ import { type FieldsOf, unfreeze } from '@aztec/foundation/types'; import type { P2P } from '@aztec/p2p'; import type { BlockProposalValidator } from '@aztec/p2p/msg_validators'; import type { BlockData, L2Block, L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; -import type { Checkpoint } from '@aztec/stdlib/checkpoint'; +import { type Checkpoint, InMemoryCheckpointReexecutionTracker } from '@aztec/stdlib/checkpoint'; import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import type { ITxProvider, ValidatorClientFullConfig, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; @@ -98,6 +98,7 @@ describe('ProposalHandler checkpoint validation', () => { epochCache, config, mock(), + new InMemoryCheckpointReexecutionTracker(), metrics, dateProvider, ); @@ -167,6 +168,7 @@ describe('ProposalHandler checkpoint validation', () => { epochCache, config, mock(), + new InMemoryCheckpointReexecutionTracker(), metrics, dateProvider, ); diff --git a/yarn-project/validator-client/src/proposal_handler.ts b/yarn-project/validator-client/src/proposal_handler.ts index 34106e89a7f5..6a7168eee80c 100644 --- a/yarn-project/validator-client/src/proposal_handler.ts +++ b/yarn-project/validator-client/src/proposal_handler.ts @@ -20,6 +20,7 @@ import { DateProvider, Timer } from '@aztec/foundation/timer'; import type { P2P, PeerId } from '@aztec/p2p'; import { BlockProposalValidator } from '@aztec/p2p/msg_validators'; import type { BlockData, L2Block, L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; +import type { CheckpointReexecutionTracker } from '@aztec/stdlib/checkpoint'; import { getPreviousCheckpointOutHashes, validateCheckpoint } from '@aztec/stdlib/checkpoint'; import { getEpochAtSlot, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import { Gas } from '@aztec/stdlib/gas'; @@ -117,6 +118,7 @@ export class ProposalHandler { private epochCache: EpochCache, private config: ValidatorClientFullConfig, private blobClient: BlobClientInterface, + private reexecutionTracker: CheckpointReexecutionTracker, private metrics?: ValidatorMetrics, private dateProvider: DateProvider = new DateProvider(), telemetry: TelemetryClient = getTelemetryClient(), @@ -794,18 +796,21 @@ export class ProposalHandler { ): Promise { const slot = proposal.slotNumber; - // Timeout block syncing at the start of the next slot + // Block-sync deadline = the moment the proposer can no longer publish this checkpoint to L1. + // With pipelining off that's the end of the proposal's own slot; with pipelining on the + // proposal is built one slot ahead, so the publication deadline is the start of the target + // slot. `getReexecutionDeadline` handles both cases. const config = this.checkpointsBuilder.getConfig(); - const nextSlotTimestampSeconds = Number(getTimestampForSlot(SlotNumber(slot + 1), config)); - const timeoutSeconds = Math.max(1, nextSlotTimestampSeconds - Math.floor(this.dateProvider.now() / 1000)); + const deadline = this.getReexecutionDeadline(slot, config); + const timeoutSeconds = Math.max(1, Math.floor((deadline.getTime() - this.dateProvider.now()) / 1000)); // Wait for last block to sync by archive - let lastBlockHeader; + let lastBlockData; try { - lastBlockHeader = await retryUntil( + lastBlockData = await retryUntil( async () => { await this.blockSource.syncImmediate(); - return (await this.blockSource.getBlockData({ archive: proposal.archive }))?.header; + return await this.blockSource.getBlockData({ archive: proposal.archive }); }, `waiting for block with archive ${proposal.archive.toString()} for slot ${slot}`, timeoutSeconds, @@ -820,11 +825,21 @@ export class ProposalHandler { return { isValid: false, reason: 'block_fetch_error' }; } - if (!lastBlockHeader) { + if (!lastBlockData) { this.log.warn(`Last block not found for checkpoint proposal`, proposalInfo); return { isValid: false, reason: 'last_block_not_found' }; } + // Refuse to attest if the block's enclosing checkpoint has already been published to L1. + const existingCheckpoint = await this.blockSource.getCheckpointData({ number: lastBlockData.checkpointNumber }); + if (existingCheckpoint) { + this.log.warn(`Refusing to attest to checkpoint proposal whose checkpoint is already on L1`, { + ...proposalInfo, + checkpointNumber: lastBlockData.checkpointNumber, + }); + return { isValid: false, reason: 'checkpoint_already_published' }; + } + // Get all full blocks for the slot and checkpoint const blocks = await this.blockSource.getBlocksForSlot(slot); if (blocks.length === 0) { @@ -943,6 +958,23 @@ export class ProposalHandler { } this.log.verbose(`Checkpoint proposal validation successful for slot ${slot}`, proposalInfo); + + // Maintain re-execution tracker for any obsevers + + // Drop tracker entries for checkpoints that have reached L1 finality. + try { + const tips = await this.blockSource.getL2Tips(); + const finalizedCheckpointNumber = tips.finalized.checkpoint.number; + if (finalizedCheckpointNumber > 0) { + this.reexecutionTracker.removeBefore(CheckpointNumber(finalizedCheckpointNumber + 1)); + } + } catch (err) { + this.log.error(`Error pruning reexecution tracker`, err, proposalInfo); + } + + // We successfully re-executed every block in this checkpoint locally, record for any observers + this.reexecutionTracker.recordReexecuted(checkpointNumber, proposal.archive); + return { isValid: true, checkpointNumber }; } diff --git a/yarn-project/validator-client/src/validator.ha.integration.test.ts b/yarn-project/validator-client/src/validator.ha.integration.test.ts index 186561fd920b..9146f291f62c 100644 --- a/yarn-project/validator-client/src/validator.ha.integration.test.ts +++ b/yarn-project/validator-client/src/validator.ha.integration.test.ts @@ -17,6 +17,7 @@ import type { P2P, TxProvider } from '@aztec/p2p'; import { BlockProposalValidator } from '@aztec/p2p'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import type { L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; +import { InMemoryCheckpointReexecutionTracker } from '@aztec/stdlib/checkpoint'; import type { SlasherConfig, ValidatorClientFullConfig, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import { computeInHashFromL1ToL2Messages } from '@aztec/stdlib/messaging'; import type { L1ToL2MessageSource } from '@aztec/stdlib/messaging'; @@ -226,6 +227,7 @@ describe('ValidatorClient HA Integration', () => { epochCache, config, blobClient, + new InMemoryCheckpointReexecutionTracker(), metrics, dateProvider, getTelemetryClient(), diff --git a/yarn-project/validator-client/src/validator.integration.test.ts b/yarn-project/validator-client/src/validator.integration.test.ts index da2ff3670674..fba7d30f8cf2 100644 --- a/yarn-project/validator-client/src/validator.integration.test.ts +++ b/yarn-project/validator-client/src/validator.integration.test.ts @@ -21,7 +21,7 @@ import { TestTxProvider } from '@aztec/p2p/test-helpers'; import { protocolContractsHash } from '@aztec/protocol-contracts'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { CommitteeAttestation, L2Block } from '@aztec/stdlib/block'; -import { L1PublishedData, PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; +import { InMemoryCheckpointReexecutionTracker, L1PublishedData, PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; import { type L1RollupConstants, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import { Gas, GasFees } from '@aztec/stdlib/gas'; import { tryStop } from '@aztec/stdlib/interfaces/server'; @@ -197,6 +197,7 @@ describe('ValidatorClient Integration', () => { txProvider, keyStoreManager, blobClient, + new InMemoryCheckpointReexecutionTracker(), dateProvider, ); diff --git a/yarn-project/validator-client/src/validator.test.ts b/yarn-project/validator-client/src/validator.test.ts index 6e3bc422f65e..a74a76131007 100644 --- a/yarn-project/validator-client/src/validator.test.ts +++ b/yarn-project/validator-client/src/validator.test.ts @@ -30,6 +30,7 @@ import { import { OffenseType, WANT_TO_CLEAR_SLASH_EVENT, WANT_TO_SLASH_EVENT } from '@aztec/slasher'; import { AztecAddress } from '@aztec/stdlib/aztec-address'; import { type BlockData, BlockHash, L2Block, type L2BlockSink, type L2BlockSource } from '@aztec/stdlib/block'; +import { InMemoryCheckpointReexecutionTracker } from '@aztec/stdlib/checkpoint'; import { type getEpochAtSlot, getTimestampForSlot } from '@aztec/stdlib/epoch-helpers'; import type { SlasherConfig, WorldStateSynchronizer } from '@aztec/stdlib/interfaces/server'; import { type L1ToL2MessageSource, computeInHashFromL1ToL2Messages } from '@aztec/stdlib/messaging'; @@ -209,6 +210,7 @@ describe('ValidatorClient', () => { txProvider, keyStoreManager, blobClient, + new InMemoryCheckpointReexecutionTracker(), dateProvider, )) as ValidatorClient; }); diff --git a/yarn-project/validator-client/src/validator.ts b/yarn-project/validator-client/src/validator.ts index cf8d510edd3a..ad43f19180da 100644 --- a/yarn-project/validator-client/src/validator.ts +++ b/yarn-project/validator-client/src/validator.ts @@ -21,6 +21,7 @@ import { } from '@aztec/slasher'; import type { AztecAddress } from '@aztec/stdlib/aztec-address'; import type { CommitteeAttestationsAndSigners, L2BlockSink, L2BlockSource } from '@aztec/stdlib/block'; +import type { CheckpointReexecutionTracker } from '@aztec/stdlib/checkpoint'; import { getEpochAtSlot } from '@aztec/stdlib/epoch-helpers'; import type { ITxProvider, @@ -204,6 +205,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) txProvider: ITxProvider, keyStoreManager: KeystoreManager, blobClient: BlobClientInterface, + reexecutionTracker: CheckpointReexecutionTracker, dateProvider: DateProvider = new DateProvider(), telemetry: TelemetryClient = getTelemetryClient(), slashingProtectionDb?: SlashingProtectionDatabase, @@ -228,6 +230,7 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) epochCache, config, blobClient, + reexecutionTracker, metrics, dateProvider, telemetry, @@ -528,6 +531,11 @@ export class ValidatorClient extends (EventEmitter as new () => WatcherEmitter) return undefined; } + // Early-out for equivocation: refuses if we've already attested to a higher slot. + if (!this.shouldAttestToSlot(proposalSlotNumber)) { + return undefined; + } + // Ignore proposals from ourselves (may happen in HA setups) if (proposer && this.getValidatorAddresses().some(addr => addr.equals(proposer))) { this.log.debug(`Ignoring block proposal from self for slot ${proposalSlotNumber}`, {