diff --git a/Makefile b/Makefile index 26de039d..d39c9291 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ INSTALL_MODE ?= $(if $(LUMERA_DEFAULT_VERSION),$(LUMERA_DEFAULT_VERSION),latest- install-lumera: @echo "Installing Lumera..." @chmod +x tests/scripts/install-lumera.sh - @sudo LUMERAD_BINARY="$(LUMERAD_BINARY)" tests/scripts/install-lumera.sh $(INSTALL_MODE) + @LUMERAD_BINARY="$(LUMERAD_BINARY)" tests/scripts/install-lumera.sh $(INSTALL_MODE) @echo "PtTDUHythfRfXHh63yzyiGDid4TZj2P76Zd,18749999981413" > ~/claims.csv # Setup supernode environments diff --git a/go.sum b/go.sum index 2f92226d..89f2b862 100644 --- a/go.sum +++ b/go.sum @@ -111,7 +111,7 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.12.0 h1:BHkPF/vCKyGFKtl2MMxtRpUyzraJ96rWY9FniTbG6cU= +github.com/LumeraProtocol/lumera v1.12.0 h1:ZtGvnwuwOYbbveV21581D6LbMhy9KOVbDAtmck7VAyY= github.com/LumeraProtocol/lumera v1.12.0/go.mod h1:/G9LTPZB+261tHoWoj7q+1fn+O/VV0zzagwLdsThSNo= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= diff --git a/pkg/lumera/chainerrors/chainerrors_test.go b/pkg/lumera/chainerrors/chainerrors_test.go index c1d9ea16..85f944c1 100644 --- a/pkg/lumera/chainerrors/chainerrors_test.go +++ b/pkg/lumera/chainerrors/chainerrors_test.go @@ -14,7 +14,7 @@ import ( // abciErr re-creates an error with typed sentinel preserved across %w wrap, // matching the production wrap path in pkg/lumera/modules/tx/impl.go after -// the Wave 0 boundary fix. +// the LEP-6 review fix boundary fix. func abciErr(sentinel *errorsmod.Error, rawLog string) error { return fmt.Errorf("tx failed: code=%d codespace=%s height=0 gas_wanted=0 gas_used=0 raw_log=%s: %w", sentinel.ABCICode(), sentinel.Codespace(), rawLog, sentinel) diff --git a/pkg/lumera/modules/tx/wave1_seq_cap_test.go b/pkg/lumera/modules/tx/sequence_mismatch_cap_test.go similarity index 100% rename from pkg/lumera/modules/tx/wave1_seq_cap_test.go rename to pkg/lumera/modules/tx/sequence_mismatch_cap_test.go diff --git a/pkg/storage/queries/wave1_schema_test.go b/pkg/storage/queries/recheck_schema_migration_test.go similarity index 99% rename from pkg/storage/queries/wave1_schema_test.go rename to pkg/storage/queries/recheck_schema_migration_test.go index ccbcd40b..605e4bdd 100644 --- a/pkg/storage/queries/wave1_schema_test.go +++ b/pkg/storage/queries/recheck_schema_migration_test.go @@ -61,7 +61,7 @@ func TestMigrateStorageRecheckSubmissionsPK(t *testing.T) { defer db.Close() ctx := context.Background() - // Seed the OLD schema (pre-Wave-1 PK). + // Seed the OLD schema (before this fix PK). const oldSchema = ` CREATE TABLE storage_recheck_submissions ( epoch_id INTEGER NOT NULL, diff --git a/pkg/storage/queries/recheck_test.go b/pkg/storage/queries/recheck_test.go index da6e8724..87d76dde 100644 --- a/pkg/storage/queries/recheck_test.go +++ b/pkg/storage/queries/recheck_test.go @@ -13,10 +13,10 @@ import ( "github.com/stretchr/testify/require" ) -// TestRecheckSubmissionDedupPerTarget asserts the Wave 1 / C2 fix: chain +// TestRecheckSubmissionDedupPerTarget asserts the LEP-6 C2 fix: chain // dedup is per-(epoch, ticket, target_account), so two distinct targets // within the same (epoch, ticket) must produce two persisted rows. Before -// Wave 1, the PK was (epoch, ticket) and the second target's row was +// LEP-6 review fix, the PK was (epoch, ticket) and the second target's row was // silently dropped — masking that supernode from chain N/R/D math. func TestRecheckSubmissionDedupPerTarget(t *testing.T) { db := sqlx.MustConnect("sqlite3", ":memory:") @@ -63,7 +63,7 @@ func TestRecheckSubmissionDedupPerTarget(t *testing.T) { } // TestRecordPendingRecheckSubmission_DuplicateReturnsTypedError covers the -// Wave 1 / L3 fix: duplicate-pending writes used to be silently swallowed +// LEP-6 L3 fix: duplicate-pending writes used to be silently swallowed // by `INSERT OR IGNORE`; they now return ErrLEP6RecheckAlreadyRecorded so // the attestor can branch on it. func TestRecordPendingRecheckSubmission_DuplicateReturnsTypedError(t *testing.T) { diff --git a/pkg/storage/queries/self_healing_lep6_test.go b/pkg/storage/queries/self_healing_lep6_test.go index 3899b1da..9b0c656d 100644 --- a/pkg/storage/queries/self_healing_lep6_test.go +++ b/pkg/storage/queries/self_healing_lep6_test.go @@ -93,7 +93,7 @@ func TestLEP6HealClaimPendingLifecycle(t *testing.T) { ctx := context.Background() require.NoError(t, store.RecordPendingHealClaim(ctx, 101, "ticket-101", "manifest", "/tmp/stage")) - // Wave 2 / C5 fix: HasHealClaim returns SUBMITTED-only. A pending row + // LEP-6 C5 fix: HasHealClaim returns SUBMITTED-only. A pending row // must not block fresh dispatch. has, err := store.HasHealClaim(ctx, 101) require.NoError(t, err) @@ -124,7 +124,7 @@ func TestLEP6HealVerificationPendingLifecycle(t *testing.T) { ctx := context.Background() require.NoError(t, store.RecordPendingHealVerification(ctx, 202, "verifier-a", true, "hash")) - // Wave 2 / C5 fix: pending must NOT count as submitted. + // LEP-6 C5 fix: pending must NOT count as submitted. has, err := store.HasHealVerification(ctx, 202, "verifier-a") require.NoError(t, err) require.False(t, has, "pending row must NOT count as submitted (C5)") diff --git a/pkg/storage/queries/wave3_state_test.go b/pkg/storage/queries/storage_challenge_state_test.go similarity index 100% rename from pkg/storage/queries/wave3_state_test.go rename to pkg/storage/queries/storage_challenge_state_test.go diff --git a/pkg/storagechallenge/deterministic/lep6.go b/pkg/storagechallenge/deterministic/lep6.go index 85f1753c..a0435b25 100644 --- a/pkg/storagechallenge/deterministic/lep6.go +++ b/pkg/storagechallenge/deterministic/lep6.go @@ -494,8 +494,10 @@ func SelectArtifactOrdinal(seed []byte, target, ticketID string, class audittype // passed explicitly so a future param change at the chain level can be // surfaced cleanly. The returned slice has length exactly k. // -// Returns an error if rangeLen >= artifactSize (would yield negative modulus -// space) or if any input is degenerate (k=0, empty class). +// Returns an error if rangeLen > artifactSize (out-of-bounds range), +// artifactSize is zero, or any input is degenerate (k=0, empty class). When +// rangeLen == artifactSize, the whole artifact is challenged and every offset +// is deterministically 0. // // IMPORTANT: u32be(ordinal) and u32be(i) are written as raw 4-byte // big-endian integers, not as decimal-string forms — this keeps the byte @@ -505,18 +507,24 @@ func ComputeMultiRangeOffsets(seed []byte, target, ticketID string, class auditt if k <= 0 { return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: k must be > 0") } + if artifactSize == 0 { + return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: artifactSize must be > 0") + } if rangeLen == 0 { return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: rangeLen must be > 0") } - if artifactSize <= rangeLen { - return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: artifactSize (%d) must be > rangeLen (%d)", artifactSize, rangeLen) + if rangeLen > artifactSize { + return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: rangeLen (%d) must be <= artifactSize (%d)", rangeLen, artifactSize) } classDomain := ArtifactClassDomain(class) if classDomain == "" { return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: unsupported class %v", class) } - span := artifactSize - rangeLen offsets := make([]uint64, k) + if rangeLen == artifactSize { + return offsets, nil + } + span := artifactSize - rangeLen var ordBuf, idxBuf [4]byte binary.BigEndian.PutUint32(ordBuf[:], ordinal) for i := 0; i < k; i++ { diff --git a/pkg/storagechallenge/deterministic/lep6_test.go b/pkg/storagechallenge/deterministic/lep6_test.go index 43585ab5..01dd80c0 100644 --- a/pkg/storagechallenge/deterministic/lep6_test.go +++ b/pkg/storagechallenge/deterministic/lep6_test.go @@ -384,6 +384,37 @@ func TestComputeMultiRangeOffsets_AllInBounds(t *testing.T) { } } +func TestComputeMultiRangeOffsets_WholeArtifactWhenRangeEqualsSize(t *testing.T) { + const size = uint64(103) + offsets, err := ComputeMultiRangeOffsets(chainSeed, "sn-target", "ticket-small", + audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, 0, size, size, 4) + if err != nil { + t.Fatalf("err: %v", err) + } + if len(offsets) != 4 { + t.Fatalf("expected 4 offsets, got %d", len(offsets)) + } + for i, off := range offsets { + if off != 0 { + t.Fatalf("offset %d = %d, want 0 for whole-artifact challenge", i, off) + } + } +} + +func TestComputeMultiRangeOffsets_RejectsOutOfBoundsRangeLen(t *testing.T) { + cls := audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL + if _, err := ComputeMultiRangeOffsets(chainSeed, "x", "t", cls, 0, 100, 256, 4); err == nil { + t.Fatal("expected error when rangeLen exceeds artifactSize") + } +} + +func TestComputeMultiRangeOffsets_RejectsZeroArtifactSize(t *testing.T) { + cls := audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL + if _, err := ComputeMultiRangeOffsets(chainSeed, "x", "t", cls, 0, 0, 0, 4); err == nil { + t.Fatal("expected error for zero artifactSize") + } +} + func TestComputeMultiRangeOffsets_OffsetsDistinctOnDifferentInputs(t *testing.T) { const size, rl = uint64(10000), uint64(256) a, _ := ComputeMultiRangeOffsets(chainSeed, "sn-target", "ticket-1", diff --git a/pkg/storagechallenge/wave3_index_cache_test.go b/pkg/storagechallenge/index_size_cache_test.go similarity index 100% rename from pkg/storagechallenge/wave3_index_cache_test.go rename to pkg/storagechallenge/index_size_cache_test.go diff --git a/pkg/storagechallenge/lep6_resolution.go b/pkg/storagechallenge/lep6_resolution.go index 604cf552..31aa3391 100644 --- a/pkg/storagechallenge/lep6_resolution.go +++ b/pkg/storagechallenge/lep6_resolution.go @@ -34,26 +34,32 @@ var ErrUnspecifiedArtifactClass = errors.New("storagechallenge: artifact class i // replaces a chain GetTicketArtifactCount RPC that does not exist (LEP-6 v2 // plan §9, Resolved Decision 8). // -// Semantics: -// - INDEX -> uint32(meta.RqIdsIc) -// - SYMBOL -> uint32(len(meta.RqIdsIds)) +// Semantics mirror Lumera chain action metadata exactly via +// actiontypes.CascadeArtifactCountsWithFallbackStrict: +// - INDEX -> meta.IndexArtifactCount, falling back to len(meta.RqIdsIds) +// - SYMBOL -> meta.SymbolArtifactCount, falling back to len(meta.RqIdsIds) // - UNSPECIFIED -> error // -// If both counts are zero (legacy / malformed ticket), this returns (0, nil) -// because the chain accepts that case via its TicketArtifactCountState fallback -// path (x/audit/v1/keeper/msg_submit_epoch_report_storage_proofs.go). Callers -// decide whether to skip such a ticket. +// The strict Lumera helper rejects malformed metadata where explicit counts are +// missing and the fallback universe is empty. This keeps supernode proof rows +// aligned with chain validation instead of maintaining duplicate count logic. func ResolveArtifactCount(meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass) (uint32, error) { if meta == nil { return 0, errors.New("storagechallenge: nil cascade metadata") } switch class { - case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX: - return uint32(meta.RqIdsIc), nil - case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL: - return uint32(len(meta.RqIdsIds)), nil case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED: return 0, ErrUnspecifiedArtifactClass + case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX, + audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL: + idx, sym, err := actiontypes.CascadeArtifactCountsWithFallbackStrict(meta) + if err != nil { + return 0, fmt.Errorf("storagechallenge: resolve canonical cascade artifact counts: %w", err) + } + if class == audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX { + return idx, nil + } + return sym, nil default: return 0, fmt.Errorf("storagechallenge: unknown artifact class %v", class) } @@ -102,17 +108,19 @@ func ResolveArtifactKey(meta *actiontypes.CascadeMetadata, class audittypes.Stor } } -// ResolveArtifactSize returns the exact byte size used to derive LEP-6 -// multi-range offsets for a selected artifact. -// -// SYMBOL artifacts are RaptorQ symbols. The exact symbol size is derived from -// the finalized Action.FileSizeKbs and meta.RqIdsMax: +// ResolveArtifactSize returns the deterministic metadata-derived byte-size +// fallback used to derive LEP-6 multi-range offsets for a selected artifact. // -// symbolSize = ceil(fileSizeKbs*1024 / meta.RqIdsMax) +// Production dispatch should prefer the stored blob size returned by the same +// storage layer used by the recipient ArtifactReader. SYMBOL artifacts are +// compressed/content-addressed RaptorQ blobs, and their served byte length can +// differ from the logical ceil(fileSizeKbs*1024 / meta.RqIdsMax) estimate. +// This fallback remains useful for tests and for deployments where the local +// store cannot report the selected artifact size. // // INDEX artifacts are generated deterministically from meta.Signatures, -// meta.RqIdsIc, and meta.RqIdsMax; their exact compressed byte length is the -// length of the selected generated index file. +// meta.RqIdsIc, and meta.RqIdsMax; their fallback byte length is the length of +// the selected generated index file. func ResolveArtifactSize(act *actiontypes.Action, meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass, ordinal uint32) (uint64, error) { if act == nil { return 0, errors.New("storagechallenge: nil action") diff --git a/pkg/storagechallenge/lep6_resolution_test.go b/pkg/storagechallenge/lep6_resolution_test.go index 29d77908..811e8cd3 100644 --- a/pkg/storagechallenge/lep6_resolution_test.go +++ b/pkg/storagechallenge/lep6_resolution_test.go @@ -16,25 +16,27 @@ func TestMaxStorageProofResultsPerReportTracksChainConstant(t *testing.T) { func TestResolveArtifactCount_Index_Symbol_Unspecified(t *testing.T) { meta := &actiontypes.CascadeMetadata{ - RqIdsIc: 7, - RqIdsMax: 12, - RqIdsIds: []string{"a", "b", "c", "d"}, + RqIdsIc: 7, + RqIdsMax: 12, + RqIdsIds: []string{"a", "b", "c", "d"}, + IndexArtifactCount: 50, + SymbolArtifactCount: 60, } gotIdx, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX) if err != nil { t.Fatalf("INDEX: unexpected error: %v", err) } - if gotIdx != 7 { - t.Fatalf("INDEX count: want 7, got %d", gotIdx) + if gotIdx != 50 { + t.Fatalf("INDEX count: want chain-canonical 50, got %d", gotIdx) } gotSym, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL) if err != nil { t.Fatalf("SYMBOL: unexpected error: %v", err) } - if gotSym != 4 { - t.Fatalf("SYMBOL count: want 4, got %d", gotSym) + if gotSym != 60 { + t.Fatalf("SYMBOL count: want chain-canonical 60, got %d", gotSym) } if _, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED); err == nil { @@ -42,6 +44,21 @@ func TestResolveArtifactCount_Index_Symbol_Unspecified(t *testing.T) { } } +func TestResolveArtifactCount_UsesLumeraCanonicalFallback(t *testing.T) { + meta := &actiontypes.CascadeMetadata{ + RqIdsIc: 11, + RqIdsIds: make([]string, 50), + } + + gotIdx, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX) + if err != nil { + t.Fatalf("INDEX: unexpected error: %v", err) + } + if gotIdx != 50 { + t.Fatalf("INDEX count must mirror Lumera CascadeArtifactCountsWithFallbackStrict fallback: want 50, got %d", gotIdx) + } +} + func TestResolveArtifactCount_LegacyZero(t *testing.T) { meta := &actiontypes.CascadeMetadata{} // both INDEX (RqIdsIc) and SYMBOL (len(RqIdsIds)) are zero for _, class := range []audittypes.StorageProofArtifactClass{ @@ -49,11 +66,11 @@ func TestResolveArtifactCount_LegacyZero(t *testing.T) { audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, } { got, err := ResolveArtifactCount(meta, class) - if err != nil { - t.Fatalf("class=%v: legacy zero should not error, got: %v", class, err) + if err == nil { + t.Fatalf("class=%v: malformed empty metadata should error", class) } if got != 0 { - t.Fatalf("class=%v: want 0, got %d", class, got) + t.Fatalf("class=%v: errored metadata should return 0, got %d", class, got) } } } diff --git a/supernode/cascade/reseed_wave3_test.go b/supernode/cascade/reseed_stream_copy_test.go similarity index 100% rename from supernode/cascade/reseed_wave3_test.go rename to supernode/cascade/reseed_stream_copy_test.go diff --git a/supernode/cascade/wave2_streamcopy_test.go b/supernode/cascade/stream_copy_file_test.go similarity index 100% rename from supernode/cascade/wave2_streamcopy_test.go rename to supernode/cascade/stream_copy_file_test.go diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 799e9267..971986f8 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -229,8 +229,9 @@ The supernode will connect to the Lumera network and begin participating in the hostReporter.SetProofResultProvider(resultBuffer) } + artifactReader := storageChallengeService.NewP2PArtifactReader(p2pService) storageChallengeServer := storageChallengeRPC.NewServer(appConfig.SupernodeConfig.Identity, p2pService, historyStore). - WithArtifactReader(newP2PArtifactReader(p2pService)). + WithArtifactReader(artifactReader). WithRecipientSigner(kr, appConfig.SupernodeConfig.KeyName). WithAuditParams(lumeraClient.Audit()) var storageChallengeRunner *storageChallengeService.Service @@ -263,12 +264,13 @@ The supernode will connect to the Lumera network and begin participating in the appConfig.SupernodeConfig.Identity, storageChallengeService.NewSecureSupernodeClientFactory(lumeraClient, kr, appConfig.SupernodeConfig.Identity, appConfig.SupernodeConfig.Port), storageChallengeService.NewChainTicketProvider(lumeraClient), - newCascadeMetaProvider(lumeraClient), + storageChallengeService.NewCascadeMetaProvider(lumeraClient), resultBuffer, ) if derr != nil { logtrace.Fatal(ctx, "Failed to initialize LEP-6 dispatcher", logtrace.Fields{"error": derr.Error()}) } + dispatcher.SetArtifactSizeProvider(artifactReader) storageChallengeRunner.SetLEP6Dispatcher(dispatcher) if appConfig.StorageChallengeConfig.LEP6.Recheck.Enabled { diff --git a/supernode/config/wave4_regression_test.go b/supernode/config/lep6_config_regression_test.go similarity index 96% rename from supernode/config/wave4_regression_test.go rename to supernode/config/lep6_config_regression_test.go index 8fe5255b..955c4c54 100644 --- a/supernode/config/wave4_regression_test.go +++ b/supernode/config/lep6_config_regression_test.go @@ -7,7 +7,7 @@ import ( "testing" ) -// Wave 4 — LEP-6 PR286 review fix regression tests. +// LEP-6 review regression: LEP-6 PR286 review fix regression tests. // // Coverage: // - C1: missing-block default for LEP-6 toggles is FALSE (no silent @@ -16,7 +16,7 @@ import ( // cases (wrong-direction default would cause auto-opt-in) and the // advisory helper. // - L6: structural validator rejects recheck=true with disabled parents. -// Pre-Wave-4, fixtures could carry recheck.enabled=true while +// Before this fix, fixtures could carry recheck.enabled=true while // storage_challenge.enabled=false, silently no-op'd at runtime. func TestLoadConfig_C1_MissingBlocksDefaultDisabled(t *testing.T) { diff --git a/supernode/config/wave4_staging_test.go b/supernode/config/staging_dir_config_test.go similarity index 100% rename from supernode/config/wave4_staging_test.go rename to supernode/config/staging_dir_config_test.go diff --git a/supernode/host_reporter/wave4_probetcp_test.go b/supernode/host_reporter/probe_tcp_state_test.go similarity index 94% rename from supernode/host_reporter/wave4_probetcp_test.go rename to supernode/host_reporter/probe_tcp_state_test.go index ab2494e9..94c56ca9 100644 --- a/supernode/host_reporter/wave4_probetcp_test.go +++ b/supernode/host_reporter/probe_tcp_state_test.go @@ -10,7 +10,7 @@ import ( audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" ) -// Wave 4 — LEP-6 review M6 (Matee, 2026-05-06). probeTCP must distinguish +// LEP-6 review regression: LEP-6 review M6 (Matee, 2026-05-06). probeTCP must distinguish // canonical CLOSED (ECONNREFUSED) from operator-side faults (DNS, host // unreach, ctx errors, timeouts) which now report UNKNOWN. @@ -64,7 +64,7 @@ func TestProbeTCP_M6_RefusedReturnsClosed(t *testing.T) { } } -// TestProbeTCP_M6_DNSFailureReturnsUnknown — pre-Wave-4 a DNS resolution +// TestProbeTCP_M6_DNSFailureReturnsUnknown — before this fix a DNS resolution // failure mapped to CLOSED, falsely accusing the peer's port of being shut. // Now must map to UNKNOWN. func TestProbeTCP_M6_DNSFailureReturnsUnknown(t *testing.T) { diff --git a/supernode/host_reporter/service.go b/supernode/host_reporter/service.go index 73c96353..d8390046 100644 --- a/supernode/host_reporter/service.go +++ b/supernode/host_reporter/service.go @@ -186,7 +186,16 @@ func (s *Service) tick(ctx context.Context) { var storageProofResults []*audittypes.StorageProofResult if proofResultProvider := s.getProofResultProvider(); proofResultProvider != nil { storageProofResults = proofResultProvider.CollectResults(epochID) - if s.fullModeStorageProofCoverageRequired(tickCtx) { + mode, modeOK := s.storageTruthEnforcementMode(tickCtx) + if modeOK && mode != audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED && len(assignResp.TargetSupernodeAccounts) > 0 && len(storageProofResults) == 0 { + logtrace.Warn(tickCtx, "epoch report skipped: waiting for LEP-6 storage proof results", logtrace.Fields{ + "epoch_id": epochID, + "assigned_targets": len(assignResp.TargetSupernodeAccounts), + "mode": mode.String(), + }) + return + } + if modeOK && mode == audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL { complete, reason := storageProofCoverageComplete(storageProofResults, assignResp.TargetSupernodeAccounts) if !complete { if requeuer, ok := proofResultProvider.(ProofResultRequeuer); ok { @@ -231,12 +240,12 @@ func (s *Service) tick(ctx context.Context) { }) } -func (s *Service) fullModeStorageProofCoverageRequired(ctx context.Context) bool { +func (s *Service) storageTruthEnforcementMode(ctx context.Context) (audittypes.StorageTruthEnforcementMode, bool) { paramsResp, err := s.lumera.Audit().GetParams(ctx) if err != nil || paramsResp == nil { - return false + return audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED, false } - return paramsResp.Params.StorageTruthEnforcementMode == audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL + return paramsResp.Params.StorageTruthEnforcementMode, true } func storageProofCoverageComplete(results []*audittypes.StorageProofResult, targets []string) (bool, string) { diff --git a/supernode/host_reporter/tick_behavior_test.go b/supernode/host_reporter/tick_behavior_test.go index b2e02937..309238de 100644 --- a/supernode/host_reporter/tick_behavior_test.go +++ b/supernode/host_reporter/tick_behavior_test.go @@ -321,6 +321,66 @@ func TestTick_AttachedProofResultProviderIsDrainedAndForwarded(t *testing.T) { } } +func TestTick_SOFTModeWithAssignedTargetsWaitsForLEP6ProofResults(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + kr, keyName, identity := testKeyringAndIdentity(t) + auditMod := &stubAuditModule{ + currentEpoch: &audittypes.QueryCurrentEpochResponse{EpochId: 13}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: audittypes.EpochAnchor{EpochId: 13}}, + epochReportErr: status.Error(codes.NotFound, "not found"), + assigned: &audittypes.QueryAssignedTargetsResponse{ + TargetSupernodeAccounts: []string{"snA"}, + }, + params: audittypes.Params{StorageTruthEnforcementMode: audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SOFT}, + } + auditMsg := auditmsgmod.NewMockModule(ctrl) + node := nodemod.NewMockModule(ctrl) + sn := supernodemod.NewMockModule(ctrl) + client := lumeraMock.NewMockClient(ctrl) + client.EXPECT().Audit().AnyTimes().Return(auditMod) + client.EXPECT().AuditMsg().AnyTimes().Return(auditMsg) + client.EXPECT().SuperNode().AnyTimes().Return(sn) + client.EXPECT().Node().AnyTimes().Return(node) + sn.EXPECT().GetSupernodeWithLatestAddress(gomock.Any(), "snA").AnyTimes().Return(&supernodemod.SuperNodeInfo{LatestAddress: "127.0.0.1:4444"}, nil) + + provider := &stubProofResultProvider{} + auditMsg.EXPECT().SubmitEpochReport(gomock.Any(), uint64(13), gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, _ uint64, _ audittypes.HostReport, _ []*audittypes.StorageChallengeObservation, proofs []*audittypes.StorageProofResult) (*sdktx.BroadcastTxResponse, error) { + if len(proofs) != 1 || proofs[0].TicketId != "ticket-13" { + t.Fatalf("expected delayed proof result to be submitted, got %+v", proofs) + } + return &sdktx.BroadcastTxResponse{}, nil + }, + ).Times(1) + + svc, err := NewService(identity, client, kr, keyName, "", "") + if err != nil { + t.Fatalf("new service: %v", err) + } + svc.SetProofResultProvider(provider) + + // First tick models the production race: host_reporter fires before the + // LEP-6 dispatcher appends same-epoch proof results. It must not submit an + // empty storage_proof_results report because that report is idempotent and + // would permanently block the later proof rows for this epoch. + svc.tick(context.Background()) + + provider.results = []*audittypes.StorageProofResult{{ + TargetSupernodeAccount: "snA", + BucketType: audittypes.StorageProofBucketType_STORAGE_PROOF_BUCKET_TYPE_RECENT, + TicketId: "ticket-13", + TranscriptHash: "hash-13", + ResultClass: audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS, + }} + svc.tick(context.Background()) + + if len(provider.queriedEpochs) != 2 || provider.queriedEpochs[0] != 13 || provider.queriedEpochs[1] != 13 { + t.Fatalf("expected provider queried twice for epoch 13, got %v", provider.queriedEpochs) + } +} + func TestTick_FULLModeIncompleteStorageProofCoverageSkipsSubmitAndRequeues(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/supernode/recheck/wave1_regression_test.go b/supernode/recheck/recheck_regression_test.go similarity index 96% rename from supernode/recheck/wave1_regression_test.go rename to supernode/recheck/recheck_regression_test.go index fad32565..cd90a04a 100644 --- a/supernode/recheck/wave1_regression_test.go +++ b/supernode/recheck/recheck_regression_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" ) -// TestAttestor_MultiTargetSameTicketBothPersist is the Wave 1 / C2 +// TestAttestor_MultiTargetSameTicketBothPersist is the LEP-6 C2 // regression test Matee called out: two distinct targets within the same // (epoch, ticket) must each produce a persisted dedup row and a chain // submit. The previous PK collapsed both into one row and dropped the @@ -83,7 +83,7 @@ func (staticTwoReporters) ReporterAccounts(_ context.Context) ([]string, error) return []string{"reporter-bad", "reporter-good"}, nil } -// TestFinder_PerReporterErrorIsolation is the Wave 1 / L4 regression: a +// TestFinder_PerReporterErrorIsolation is the LEP-6 L4 regression: a // single failing reporter RPC must NOT mask candidates from other // reporters. func TestFinder_PerReporterErrorIsolation(t *testing.T) { diff --git a/supernode/self_healing/wave2_constants_test.go b/supernode/self_healing/lep6_constants_test.go similarity index 100% rename from supernode/self_healing/wave2_constants_test.go rename to supernode/self_healing/lep6_constants_test.go diff --git a/supernode/self_healing/mocks_test.go b/supernode/self_healing/mocks_test.go index 2ecaa488..26521160 100644 --- a/supernode/self_healing/mocks_test.go +++ b/supernode/self_healing/mocks_test.go @@ -72,7 +72,7 @@ func (p *programmableAudit) GetHealOp(ctx context.Context, healOpID uint64) (*au // surfaces as gRPC status.NotFound with the discriminating // "heal op not found" message. The previous test used a bare // errors.New("not found") which the broad legacy substring - // matcher accepted, but which Wave 0 narrowly rejects (so as not + // matcher accepted, but which LEP-6 review fix narrowly rejects (so as not // to swallow transient "block N not found" errors). return nil, status.Error(codes.NotFound, "heal op not found") } diff --git a/supernode/self_healing/wave3_regression_test.go b/supernode/self_healing/self_healing_regression_test.go similarity index 100% rename from supernode/self_healing/wave3_regression_test.go rename to supernode/self_healing/self_healing_regression_test.go diff --git a/supernode/self_healing/wave2_regression_test.go b/supernode/self_healing/self_healing_resume_regression_test.go similarity index 100% rename from supernode/self_healing/wave2_regression_test.go rename to supernode/self_healing/self_healing_resume_regression_test.go diff --git a/supernode/storage_challenge/wave3_ticket_provider_test.go b/supernode/storage_challenge/chain_ticket_provider_test.go similarity index 93% rename from supernode/storage_challenge/wave3_ticket_provider_test.go rename to supernode/storage_challenge/chain_ticket_provider_test.go index 6110ca67..64bcfcad 100644 --- a/supernode/storage_challenge/wave3_ticket_provider_test.go +++ b/supernode/storage_challenge/chain_ticket_provider_test.go @@ -11,9 +11,9 @@ import ( "go.uber.org/mock/gomock" ) -// Wave 3 — M10 regression. Pre-Wave-3 the eligibility filter required BOTH +// LEP-6 M10 regression: the previous eligibility filter required BOTH // IndexArtifactCount AND SymbolArtifactCount > 0, silently hiding INDEX-only -// or SYMBOL-only tickets from the dispatcher. Post-Wave-3 a ticket is +// or SYMBOL-only tickets from the dispatcher. After the fix a ticket is // eligible if AT LEAST ONE class is non-zero. Both-zero remains invisible. func TestChainTicketProvider_M10_AcceptsAtLeastOneClass(t *testing.T) { cases := []struct { diff --git a/supernode/cmd/lep6_adapters.go b/supernode/storage_challenge/lep6_adapters.go similarity index 61% rename from supernode/cmd/lep6_adapters.go rename to supernode/storage_challenge/lep6_adapters.go index 19e8984b..11050c15 100644 --- a/supernode/cmd/lep6_adapters.go +++ b/supernode/storage_challenge/lep6_adapters.go @@ -1,4 +1,4 @@ -package cmd +package storage_challenge import ( "context" @@ -9,6 +9,8 @@ import ( "github.com/LumeraProtocol/supernode/v2/p2p" "github.com/LumeraProtocol/supernode/v2/pkg/cascadekit" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/btcsuite/btcutil/base58" ) // p2pArtifactReader is the recipient-side adapter that satisfies the @@ -16,24 +18,33 @@ import ( // the full artifact bytes from the local p2p store and slicing the // requested range. The PR3 path is correct-but-not-optimal: a future // iteration can replace this with a range-scoped reader. +type artifactRetriever interface { + Retrieve(ctx context.Context, key string, localOnly ...bool) ([]byte, error) +} + type p2pArtifactReader struct { - p2p p2p.P2P + p2p artifactRetriever } -func newP2PArtifactReader(p p2p.P2P) *p2pArtifactReader { +func NewP2PArtifactReader(p p2p.P2P) *p2pArtifactReader { return &p2pArtifactReader{p2p: p} } // ReadArtifactRange returns bytes [start, end) for the given key. class is // currently informational; storage is content-addressed by key alone. -func (r *p2pArtifactReader) ReadArtifactRange(ctx context.Context, _ audittypes.StorageProofArtifactClass, key string, start, end uint64) ([]byte, error) { - if r == nil || r.p2p == nil { - return nil, fmt.Errorf("p2pArtifactReader: nil p2p service") +func (r *p2pArtifactReader) ArtifactSize(ctx context.Context, _ audittypes.StorageProofArtifactClass, key string) (uint64, error) { + data, err := r.retrieveVerifiedArtifact(ctx, key) + if err != nil { + return 0, err } + return uint64(len(data)), nil +} + +func (r *p2pArtifactReader) ReadArtifactRange(ctx context.Context, _ audittypes.StorageProofArtifactClass, key string, start, end uint64) ([]byte, error) { if end <= start { return nil, fmt.Errorf("p2pArtifactReader: invalid range [%d,%d)", start, end) } - data, err := r.p2p.Retrieve(ctx, key, true) + data, err := r.retrieveVerifiedArtifact(ctx, key) if err != nil { return nil, err } @@ -45,15 +56,36 @@ func (r *p2pArtifactReader) ReadArtifactRange(ctx context.Context, _ audittypes. return out, nil } -// cascadeMetaProvider implements storage_challenge.CascadeMetaProvider via -// the lumera Action module. It fetches the on-chain action, decodes its -// CascadeMetadata, and returns it alongside the finalized action FileSizeKbs -// needed for exact artifact-size derivation. +func (r *p2pArtifactReader) retrieveVerifiedArtifact(ctx context.Context, key string) ([]byte, error) { + if r == nil || r.p2p == nil { + return nil, fmt.Errorf("p2pArtifactReader: nil p2p service") + } + data, err := r.p2p.Retrieve(ctx, key, true) + if err != nil { + return nil, err + } + if len(data) == 0 { + return nil, fmt.Errorf("p2pArtifactReader: empty artifact for key %s", key) + } + hash, err := utils.Blake3Hash(data) + if err != nil { + return nil, fmt.Errorf("p2pArtifactReader: hash artifact %s: %w", key, err) + } + if got := base58.Encode(hash); got != key { + return nil, fmt.Errorf("p2pArtifactReader: content hash mismatch for key %s", key) + } + return data, nil +} + +// cascadeMetaProvider implements CascadeMetaProvider via the lumera Action +// module. It fetches the on-chain action, decodes its CascadeMetadata, and +// returns it alongside the finalized action FileSizeKbs needed for exact +// artifact-size derivation. type cascadeMetaProvider struct { client lumera.Client } -func newCascadeMetaProvider(c lumera.Client) *cascadeMetaProvider { +func NewCascadeMetaProvider(c lumera.Client) *cascadeMetaProvider { return &cascadeMetaProvider{client: c} } diff --git a/supernode/storage_challenge/lep6_adapters_test.go b/supernode/storage_challenge/lep6_adapters_test.go new file mode 100644 index 00000000..3ee90a6b --- /dev/null +++ b/supernode/storage_challenge/lep6_adapters_test.go @@ -0,0 +1,69 @@ +package storage_challenge + +import ( + "context" + "testing" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/utils" + "github.com/btcsuite/btcutil/base58" + "github.com/stretchr/testify/require" +) + +type fakeArtifactRetriever struct { + data []byte + lastKey string + lastLocal []bool +} + +func (f *fakeArtifactRetriever) Retrieve(_ context.Context, key string, localOnly ...bool) ([]byte, error) { + f.lastKey = key + f.lastLocal = append([]bool(nil), localOnly...) + return append([]byte(nil), f.data...), nil +} + +func artifactKeyForTest(t *testing.T, data []byte) string { + t.Helper() + hash, err := utils.Blake3Hash(data) + require.NoError(t, err) + return base58.Encode(hash) +} + +func TestP2PArtifactReaderReadRangeRejectsContentHashMismatch(t *testing.T) { + t.Parallel() + + stored := []byte("corrupted-symbol-bytes") + original := []byte("original-symbol-bytes") + key := artifactKeyForTest(t, original) + reader := &p2pArtifactReader{p2p: &fakeArtifactRetriever{data: stored}} + + _, err := reader.ReadArtifactRange(context.Background(), audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, key, 0, 4) + require.Error(t, err) + require.Contains(t, err.Error(), "content hash mismatch") +} + +func TestP2PArtifactReaderReadRangeReturnsVerifiedBytes(t *testing.T) { + t.Parallel() + + stored := []byte("verified-symbol-bytes") + key := artifactKeyForTest(t, stored) + fake := &fakeArtifactRetriever{data: stored} + reader := &p2pArtifactReader{p2p: fake} + + got, err := reader.ReadArtifactRange(context.Background(), audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, key, 3, 9) + require.NoError(t, err) + require.Equal(t, stored[3:9], got) + require.Equal(t, key, fake.lastKey) + require.Equal(t, []bool{true}, fake.lastLocal) +} + +func TestP2PArtifactReaderArtifactSizeRejectsContentHashMismatch(t *testing.T) { + t.Parallel() + + key := artifactKeyForTest(t, []byte("original-symbol-bytes")) + reader := &p2pArtifactReader{p2p: &fakeArtifactRetriever{data: []byte("corrupted-symbol-bytes")}} + + _, err := reader.ArtifactSize(context.Background(), audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, key) + require.Error(t, err) + require.Contains(t, err.Error(), "content hash mismatch") +} diff --git a/supernode/storage_challenge/lep6_dispatch.go b/supernode/storage_challenge/lep6_dispatch.go index 328e6872..21bfac6d 100644 --- a/supernode/storage_challenge/lep6_dispatch.go +++ b/supernode/storage_challenge/lep6_dispatch.go @@ -74,6 +74,14 @@ type CascadeMetaProvider interface { GetCascadeMetadata(ctx context.Context, ticketID string) (*actiontypes.CascadeMetadata, uint64, error) } +// ArtifactSizeProvider optionally returns the byte size of the exact artifact +// blob served by the recipient-side storage/proof reader. Runtime LEP-6 range +// selection must operate on those bytes; metadata-derived sizes are only a +// deterministic fallback when the local store cannot report a size. +type ArtifactSizeProvider interface { + ArtifactSize(ctx context.Context, class audittypes.StorageProofArtifactClass, key string) (uint64, error) +} + // TicketProvider enumerates the cascade tickets that the given target // supernode is a participant on. Returns the action_id and the action's // register-time block height (for ClassifyTicketBucket). @@ -108,6 +116,7 @@ type LEP6Dispatcher struct { supernodeClient SupernodeClientFactory tickets TicketProvider meta CascadeMetaProvider + sizes ArtifactSizeProvider buffer *Buffer mu sync.Mutex } @@ -160,6 +169,18 @@ func NewLEP6Dispatcher( }, nil } +// SetArtifactSizeProvider attaches an optional runtime artifact-size source. +// Production wires this to the same local P2P store used by the recipient +// ArtifactReader so challenger range derivation matches the bytes actually +// served by storage. Tests and non-runtime callers may leave it nil, in which +// case ResolveArtifactSize remains the deterministic fallback. +func (d *LEP6Dispatcher) SetArtifactSizeProvider(p ArtifactSizeProvider) { + if d == nil { + return + } + d.sizes = p +} + // DispatchEpoch runs the challenger flow for epochID. The flow gates on // StorageTruthEnforcementMode: UNSPECIFIED skips dispatch entirely; // SHADOW/SOFT/FULL all execute the same off-chain path (chain enforces @@ -461,11 +482,43 @@ func (d *LEP6Dispatcher) dispatchTicket( lep6metrics.IncDispatchInternalFailure("resolve_size") return fmt.Errorf("resolve artifact size: %w", err) } + // Prefer the stored blob size when available. SYMBOL artifacts in runtime + // are compressed/content-addressed RaptorQ blobs, so FileSizeKbs/RqIdsMax + // is only a logical fallback estimate and can exceed the bytes that the + // target ArtifactReader will serve. Range derivation must use the served + // blob size to avoid healthy artifacts becoming INVALID_TRANSCRIPT. + if d.sizes != nil { + storedSize, sizeErr := d.sizes.ArtifactSize(ctx, class, artifactKey) + if sizeErr != nil { + logtrace.Warn(ctx, "lep6 dispatch: stored artifact size unavailable; using metadata fallback", logtrace.Fields{ + "ticket": ticketID, "artifact_class": class.String(), "artifact_key": artifactKey, "fallback_size": artifactSize, "error": sizeErr.Error(), + }) + } else if storedSize == 0 { + logtrace.Warn(ctx, "lep6 dispatch: stored artifact size is zero; using metadata fallback", logtrace.Fields{ + "ticket": ticketID, "artifact_class": class.String(), "artifact_key": artifactKey, "fallback_size": artifactSize, + }) + } else { + if storedSize != artifactSize { + logtrace.Debug(ctx, "lep6 dispatch: using stored artifact size instead of metadata fallback", logtrace.Fields{ + "ticket": ticketID, "artifact_class": class.String(), "artifact_key": artifactKey, "metadata_size": artifactSize, "stored_size": storedSize, + }) + } + artifactSize = storedSize + } + } rangeLen := uint64(params.StorageTruthCompoundRangeLenBytes) if rangeLen == 0 { rangeLen = uint64(deterministic.LEP6CompoundRangeLenBytes) } + // Small artifacts are valid CASCADE artifacts and must still produce a + // chain-acceptable proof row. Use a per-artifact effective range length so + // the requested byte ranges stay in bounds while preserving the chain param + // as the cap for normal-sized artifacts. The derivation hash below uses the + // same effective value, and the recipient signs the same request shape. + if artifactSize > 0 && artifactSize < rangeLen { + rangeLen = artifactSize + } k := int(params.StorageTruthCompoundRangesPerArtifact) if k == 0 { k = deterministic.LEP6CompoundRangesPerArtifact diff --git a/supernode/storage_challenge/wave3_regression_test.go b/supernode/storage_challenge/lep6_dispatch_regression_test.go similarity index 90% rename from supernode/storage_challenge/wave3_regression_test.go rename to supernode/storage_challenge/lep6_dispatch_regression_test.go index e50a1ee9..65b9da45 100644 --- a/supernode/storage_challenge/wave3_regression_test.go +++ b/supernode/storage_challenge/lep6_dispatch_regression_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -// Wave 3 — LEP-6 PR286 review fix regression tests. +// LEP-6 review regression: LEP-6 PR286 review fix regression tests. // // Coverage: // - H6: SelectArtifactClass with empty rolled class emits NO_ELIGIBLE_TICKET @@ -22,12 +22,12 @@ import ( // must NOT leak into the chain row's TicketId field (chain rejects). // // H4/H5 invariants are covered by lep6_dispatch_test.go + -// result_buffer_test.go after the wave-3 rewrites; this file targets the +// result_buffer_test.go after the LEP-6 dispatcher rewrites; this file targets the // behavioural regressions specific to H6/L5 that did not have a direct test -// before this wave. +// before this regression coverage. // TestDispatchEpoch_H6_NoSwapEmitsNoEligible_TicketIdEmpty exercises the -// post-Wave-3 SelectArtifactClass behavior: with `tkt-T0` (rolls SYMBOL when +// fixed SelectArtifactClass behavior: with `tkt-T0` (rolls SYMBOL when // both classes are present) and indexCount=0, the function must return // UNSPECIFIED — wait, that's only the indexCount=0 + INDEX-roll case. For // SYMBOL-roll + indexCount=0 we still return SYMBOL and the dispatcher hits @@ -42,19 +42,21 @@ func TestDispatchEpoch_H6_RollEmptyEmitsNoEligibleNotSwap(t *testing.T) { assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{"sn-target"}}, } // `tkt-timeout` rolls INDEX under makeAnchor's seed (verified empirically; - // see find_symbol_roll.go probe). With indexCount=0 the post-Wave-3 - // behaviour MUST be UNSPECIFIED → NO_ELIGIBLE_TICKET. Pre-Wave-3 code + // see find_symbol_roll.go probe). With indexCount=0 the fixed + // behaviour MUST be UNSPECIFIED → NO_ELIGIBLE_TICKET. The previous selection code // would have swapped to SYMBOL and tried to dispatch. tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ "sn-target": {{TicketID: "tkt-timeout", AnchorBlock: 100}}, }} - // IndexArtifactCount derives from RqIdsIc, SymbolArtifactCount from len(RqIdsIds). - // RqIdsIc=0 → INDEX class empty. + // Under chain-canonical count resolution, len(RqIdsIds) is the fallback for + // both classes. Keep the fallback universe empty here so the INDEX roll has + // no chain-valid artifact universe and must emit NO_ELIGIBLE rather than + // swapping classes. meta := stubMetaProvider{ meta: &actiontypes.CascadeMetadata{ RqIdsIc: 0, RqIdsMax: 1, - RqIdsIds: []string{"sym-0"}, // SYMBOL count = 1 + RqIdsIds: []string{}, }, size: 4 * 1024, } diff --git a/supernode/storage_challenge/lep6_dispatch_test.go b/supernode/storage_challenge/lep6_dispatch_test.go index 6c967dd7..41e84b78 100644 --- a/supernode/storage_challenge/lep6_dispatch_test.go +++ b/supernode/storage_challenge/lep6_dispatch_test.go @@ -110,13 +110,30 @@ func (s stubMetaProvider) GetCascadeMetadata(_ context.Context, _ string) (*acti return s.meta, s.size, nil } +// stubArtifactSizeProvider returns stored artifact sizes keyed by artifact key. +type stubArtifactSizeProvider struct { + sizes map[string]uint64 + err error +} + +func (s stubArtifactSizeProvider) ArtifactSize(_ context.Context, _ audittypes.StorageProofArtifactClass, key string) (uint64, error) { + if s.err != nil { + return 0, s.err + } + return s.sizes[key], nil +} + // stubCompoundClient implements SupernodeCompoundClient. type stubCompoundClient struct { - resp *supernodepb.GetCompoundProofResponse - err error + resp *supernodepb.GetCompoundProofResponse + err error + requests []*supernodepb.GetCompoundProofRequest } -func (s *stubCompoundClient) GetCompoundProof(_ context.Context, _ *supernodepb.GetCompoundProofRequest) (*supernodepb.GetCompoundProofResponse, error) { +func (s *stubCompoundClient) GetCompoundProof(_ context.Context, req *supernodepb.GetCompoundProofRequest) (*supernodepb.GetCompoundProofResponse, error) { + if req != nil { + s.requests = append(s.requests, req) + } return s.resp, s.err } func (s *stubCompoundClient) Close() error { return nil } @@ -496,6 +513,160 @@ func TestDispatchEpoch_HappyPath_EmitsPassResult(t *testing.T) { require.True(t, sawPass, "expected a PASS-class result on happy path") } +func TestDispatchEpoch_SmallArtifactUsesWholeArtifactRange(t *testing.T) { + const epochID uint64 = 20 + const target = "sn-target" + anchor := makeAnchor(epochID, 200, target) + params := defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL) + params.StorageTruthCompoundRangeLenBytes = uint32(deterministic.LEP6CompoundRangeLenBytes) + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: params}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{target}}, + } + + // Choose a ticket whose deterministic class roll selects SYMBOL so the + // artifact size is derived from FileSizeKbs/RqIdsMax. With FileSizeKbs=1 and + // RqIdsMax=10 the selected symbol is 103 bytes, below the 256-byte chain cap. + var ticketID string + for i := 0; i < 100; i++ { + candidate := "tkt-small-symbol" + if i > 0 { + candidate = candidate + string(rune('a'+i)) + } + if deterministic.SelectArtifactClass(anchor.Seed, target, candidate, 1, 1) == audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL { + ticketID = candidate + break + } + } + require.NotEmpty(t, ticketID) + + tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ + target: {{TicketID: ticketID, AnchorBlock: 100}}, + }} + meta := stubMetaProvider{ + meta: &actiontypes.CascadeMetadata{RqIdsIc: 0, RqIdsMax: 10, RqIdsIds: []string{"sym-0"}}, + size: 1, + } + + const smallArtifactSize = 103 + rangeBytes := make([][]byte, deterministic.LEP6CompoundRangesPerArtifact) + hasher := blake3.New(32, nil) + for i := range rangeBytes { + buf := make([]byte, smallArtifactSize) + for j := range buf { + buf[j] = byte((i*11 + j) & 0xFF) + } + rangeBytes[i] = buf + _, _ = hasher.Write(buf) + } + client := &stubCompoundClient{resp: &supernodepb.GetCompoundProofResponse{ + Ok: true, + RangeBytes: rangeBytes, + ProofHashHex: hex.EncodeToString(hasher.Sum(nil)), + }} + d, buf := newDispatcher(t, audit, &stubFactory{client: client}, tickets, meta) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.NotEmpty(t, results) + require.NotEmpty(t, client.requests) + for _, rng := range client.requests[0].Ranges { + require.NotNil(t, rng) + require.Equal(t, uint64(0), rng.Start) + require.Equal(t, uint64(smallArtifactSize), rng.End) + } + + var sawPass bool + for _, r := range results { + if r.TicketId == ticketID { + sawPass = true + require.Equal(t, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS, r.ResultClass) + require.Equal(t, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, r.ArtifactClass) + require.NotEmpty(t, r.DerivationInputHash) + require.NotEmpty(t, r.ChallengerSignature) + } + } + require.True(t, sawPass, "expected small artifact to produce a chain-valid PASS row") +} + +func TestDispatchEpoch_StoredSymbolSizeOverridesMetadataEstimate(t *testing.T) { + const epochID uint64 = 21 + const target = "sn-target" + anchor := makeAnchor(epochID, 200, target) + params := defaultParams(audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL) + params.StorageTruthCompoundRangeLenBytes = uint32(deterministic.LEP6CompoundRangeLenBytes) + audit := &dispatchAuditModule{ + params: &audittypes.QueryParamsResponse{Params: params}, + anchor: &audittypes.QueryEpochAnchorResponse{Anchor: anchor}, + assigned: &audittypes.QueryAssignedTargetsResponse{TargetSupernodeAccounts: []string{target}}, + } + + var ticketID string + for i := 0; i < 100; i++ { + candidate := "tkt-stored-symbol" + if i > 0 { + candidate = candidate + string(rune('a'+i)) + } + if deterministic.SelectArtifactClass(anchor.Seed, target, candidate, 1, 1) == audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL { + ticketID = candidate + break + } + } + require.NotEmpty(t, ticketID) + + tickets := stubTicketProvider{tickets: map[string][]TicketDescriptor{ + target: {{TicketID: ticketID, AnchorBlock: 100}}, + }} + meta := stubMetaProvider{ + meta: &actiontypes.CascadeMetadata{RqIdsIc: 0, RqIdsMax: 1, RqIdsIds: []string{"sym-0"}}, + // Metadata fallback would be 1025*1024 bytes, much larger than the + // compressed SYMBOL blob stored and served by runtime CASCADE. + size: 1025, + } + + rangeBytes := make([][]byte, deterministic.LEP6CompoundRangesPerArtifact) + hasher := blake3.New(32, nil) + for i := range rangeBytes { + buf := make([]byte, deterministic.LEP6CompoundRangeLenBytes) + for j := range buf { + buf[j] = byte((i*13 + j) & 0xFF) + } + rangeBytes[i] = buf + _, _ = hasher.Write(buf) + } + client := &stubCompoundClient{resp: &supernodepb.GetCompoundProofResponse{ + Ok: true, + RangeBytes: rangeBytes, + ProofHashHex: hex.EncodeToString(hasher.Sum(nil)), + }} + d, buf := newDispatcher(t, audit, &stubFactory{client: client}, tickets, meta) + d.SetArtifactSizeProvider(stubArtifactSizeProvider{sizes: map[string]uint64{"sym-0": 2439}}) + + require.NoError(t, d.DispatchEpoch(context.Background(), epochID)) + results := buf.CollectResults(epochID) + require.NotEmpty(t, results) + require.NotEmpty(t, client.requests) + req := client.requests[0] + require.Equal(t, uint64(2439), req.ArtifactSize) + require.Equal(t, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, audittypes.StorageProofArtifactClass(req.ArtifactClass)) + for _, rng := range req.Ranges { + require.NotNil(t, rng) + require.LessOrEqual(t, rng.End, uint64(2439)) + require.Equal(t, uint64(deterministic.LEP6CompoundRangeLenBytes), rng.End-rng.Start) + } + + var sawPass bool + for _, r := range results { + if r.TicketId == ticketID { + sawPass = true + require.Equal(t, audittypes.StorageProofResultClass_STORAGE_PROOF_RESULT_CLASS_PASS, r.ResultClass) + require.Equal(t, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, r.ArtifactClass) + } + } + require.True(t, sawPass, "expected stored-size override to produce in-bounds PASS row") +} + func TestRecheck_GetParamsNilResponseIsClearAndDoesNotHoldDispatcherLock(t *testing.T) { var dispatcher *LEP6Dispatcher var sawUnlocked bool diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go index de449a23..3458efb9 100644 --- a/supernode/storage_challenge/service.go +++ b/supernode/storage_challenge/service.go @@ -234,19 +234,33 @@ func (s *Service) Run(ctx context.Context) error { } anchor := anchorResp.Anchor + if shouldRunLEP6Dispatch(params) && s.lep6 != nil { + if err := s.lep6.DispatchEpoch(ctx, epochID); err != nil { + logtrace.Warn(ctx, "lep6 dispatch error", logtrace.Fields{ + "epoch_id": epochID, + "error": err.Error(), + }) + lastRunEpoch = epochID + lastRunOK = false + continue + } + } + challengers := deterministic.SelectChallengers(anchor.ActiveSupernodeAccounts, anchor.Seed, epochID, params.ScChallengersPerEpoch) if !containsString(challengers, s.identity) { if loggedNotSelectedEpoch != epochID { - logtrace.Debug(ctx, "storage challenge: not selected challenger; skipping", logtrace.Fields{ - "epoch_id": epochID, - "identity": s.identity, - "selected": len(challengers), - "sc_param": params.ScChallengersPerEpoch, + logtrace.Debug(ctx, "storage challenge: not selected challenger; legacy storage challenge skipped", logtrace.Fields{ + "epoch_id": epochID, + "identity": s.identity, + "selected": len(challengers), + "sc_param": params.ScChallengersPerEpoch, + "lep6_dispatch_mode": params.StorageTruthEnforcementMode.String(), }) loggedNotSelectedEpoch = epochID } lastRunEpoch = epochID lastRunOK = true + s.persistLastRunEpoch(ctx, epochID) continue } @@ -285,20 +299,6 @@ func (s *Service) Run(ctx context.Context) error { continue } - // LEP-6 compound dispatch runs alongside the legacy single-range - // challenge for forward compatibility. The dispatcher gates - // internally on StorageTruthEnforcementMode (UNSPECIFIED skips), - // so it is dormant under chains that have not enabled storage - // truth enforcement and a no-op cost otherwise. - if s.lep6 != nil { - if err := s.lep6.DispatchEpoch(ctx, epochID); err != nil { - logtrace.Warn(ctx, "lep6 dispatch error", logtrace.Fields{ - "epoch_id": epochID, - "error": err.Error(), - }) - } - } - lastRunEpoch = epochID lastRunOK = true s.persistLastRunEpoch(ctx, epochID) @@ -369,6 +369,10 @@ func (s *Service) auditParams(ctx context.Context) (audittypes.Params, bool) { return p, true } +func shouldRunLEP6Dispatch(params audittypes.Params) bool { + return params.StorageTruthEnforcementMode != audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED +} + func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params, lookbackEpochs uint32, respTimeout time.Duration, affirmTimeout time.Duration) error { epochID := anchor.EpochId diff --git a/supernode/storage_challenge/service_metadata_test.go b/supernode/storage_challenge/service_metadata_test.go index 2a7528d9..d4141e95 100644 --- a/supernode/storage_challenge/service_metadata_test.go +++ b/supernode/storage_challenge/service_metadata_test.go @@ -37,3 +37,23 @@ func TestBuildStorageChallengeFailureEvidenceMetadata_NoMapPayload(t *testing.T) require.NoError(t, json.Unmarshal(bz, &roundtrip)) require.Equal(t, meta, roundtrip) } + +func TestLEP6DispatchShouldRunForActiveStorageTruthModes(t *testing.T) { + tests := []struct { + name string + mode audittypes.StorageTruthEnforcementMode + want bool + }{ + {name: "unspecified", mode: audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED, want: false}, + {name: "shadow", mode: audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SHADOW, want: true}, + {name: "soft", mode: audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_SOFT, want: true}, + {name: "full", mode: audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL, want: true}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + params := audittypes.Params{StorageTruthEnforcementMode: tc.mode} + require.Equal(t, tc.want, shouldRunLEP6Dispatch(params)) + }) + } +} diff --git a/tests/scripts/install-lumera.sh b/tests/scripts/install-lumera.sh index d11a450a..9b97f4aa 100755 --- a/tests/scripts/install-lumera.sh +++ b/tests/scripts/install-lumera.sh @@ -7,13 +7,35 @@ set -e # Exit immediately if a command exits with a non-zero status # ./install-lumera.sh v1.1.0 # installs this specific version # LUMERAD_BINARY=/path/to/binary ./install-lumera.sh # uses existing binary +install_file() { + local src="$1" + local dest="$2" + if [ -w "$(dirname "$dest")" ]; then + cp "$src" "$dest" + return 0 + fi + if command -v sudo >/dev/null 2>&1 && sudo -n true 2>/dev/null; then + sudo cp "$src" "$dest" + return 0 + fi + return 1 +} + install_binary() { local binary_path="$1" chmod +x "$binary_path" - sudo cp "$binary_path" /usr/local/bin/lumerad + + if ! install_file "$binary_path" /usr/local/bin/lumerad; then + local user_bin="${HOME:-$PWD}/bin" + mkdir -p "$user_bin" + cp "$binary_path" "$user_bin/lumerad" + chmod +x "$user_bin/lumerad" + export PATH="$user_bin:$PATH" + echo "Installed without sudo to $user_bin/lumerad" + fi # Verify installation - if which lumerad > /dev/null; then + if command -v lumerad > /dev/null; then echo "Installed: $(lumerad version 2>/dev/null || echo "unknown version")" else echo "Installation failed" @@ -33,32 +55,129 @@ fi # Support mode argument: 'latest-release' (default), 'latest-tag', or specific version MODE="${1:-latest-release}" +SOURCE_REF="" REPO="LumeraProtocol/lumera" GITHUB_API="https://api.github.com/repos/$REPO" +first_linux_amd64_asset_url() { + if command -v jq >/dev/null 2>&1; then + jq -r 'if type == "object" then ([.assets[]? | select(.name | test("linux_amd64.tar.gz$")) | .browser_download_url][0] // empty) else empty end' + else + grep -o '"browser_download_url"[[:space:]]*:[[:space:]]*"[^"]*linux_amd64\.tar\.gz[^"]*"' \ + | head -n 1 \ + | sed 's/.*"browser_download_url"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/' + fi +} + +release_download_url_for_tag() { + local tag="$1" + curl -s "$GITHUB_API/releases/tags/$tag" | first_linux_amd64_asset_url +} + +# Go pseudo-versions encode the source commit, not the release tag name. Map the +# trailing commit hash back to an actual GitHub release tag before looking for +# release assets. Example: v1.12.0-rc.0.20260506174431-80a4b00767a9 resolves to +# release tag v.1.12.0-rc2, whose asset is lumera_v.1.12.0-rc2_linux_amd64.tar.gz. +release_tag_for_pseudo_version() { + local version="$1" + local hash="${version##*-}" + if ! [[ "$hash" =~ ^[0-9a-f]{12,40}$ ]]; then + return 1 + fi + + # Prefer git refs over the GitHub tags API. The API can return a JSON + # error/string under CI rate limiting, which made jq fail with: + # "Cannot index string with string \"commit\"". ls-remote is stable for + # both lightweight tags and annotated tag peeled refs (refs/tags/^{}). + if command -v git >/dev/null 2>&1; then + local git_tag + git_tag=$(git ls-remote --tags "https://github.com/$REPO.git" \ + | awk -v h="$hash" ' + $1 ~ "^" h { + ref=$2 + sub(/^refs\/tags\//, "", ref) + sub(/\^\{\}$/, "", ref) + print ref + exit + }') + if [ -n "$git_tag" ]; then + echo "$git_tag" + return 0 + fi + fi + + # Fallback for environments without git. Guard on JSON shape so API errors + # yield an empty result instead of a jq type error. + if command -v jq >/dev/null 2>&1; then + curl -s -S -L "$GITHUB_API/tags?per_page=100" \ + | jq -r --arg h "$hash" 'if type == "array" then ([.[] | select((.commit.sha? // "") | startswith($h)) | .name][0] // empty) else empty end' + else + curl -s -S -L "$GITHUB_API/tags?per_page=100" \ + | python3 -c 'import json,sys; h=sys.argv[1]; +try: + tags=json.load(sys.stdin) +except Exception: + tags=[] +if not isinstance(tags, list): + tags=[] +print(next((t.get("name", "") for t in tags if isinstance(t, dict) and t.get("commit",{}).get("sha","").startswith(h)), ""))' "$hash" + fi +} + +pseudo_version_hash() { + local version="$1" + local hash="${version##*-}" + if [[ "$hash" =~ ^[0-9a-f]{12,40}$ ]]; then + echo "$hash" + fi +} + +build_lumerad_from_source_ref() { + local ref="$1" + local out="$2" + local src_dir="$3/lumera-src" + + if ! command -v git >/dev/null 2>&1; then + echo "Error: git is required to build lumerad from source" >&2 + return 1 + fi + if ! command -v "${GO:-go}" >/dev/null 2>&1; then + echo "Error: go is required to build lumerad from source" >&2 + return 1 + fi + + # Pseudo-versions can contain a shortened commit hash that is not fetchable + # directly by all servers. Use a normal clone so checkout can resolve the + # abbreviation from the repository object database. + git clone "https://github.com/$REPO.git" "$src_dir" >/dev/null + (cd "$src_dir" && git switch --detach "$ref" >/dev/null) + (cd "$src_dir" && "${GO:-go}" build -o "$out" ./cmd/lumera) +} + # Determine tag and download URL based on mode if [ "$MODE" == "latest-tag" ]; then if command -v jq >/dev/null 2>&1; then - TAG_NAME=$(curl -s "$GITHUB_API/tags" | jq -r '.[0].name') - DOWNLOAD_URL=$(curl -s "$GITHUB_API/releases/tags/$TAG_NAME" \ - | jq -r '.assets[]? | select(.name | test("linux_amd64.tar.gz$")) | .browser_download_url') + TAG_NAME=$(curl -s "$GITHUB_API/tags?per_page=1" | jq -r '.[0].name') else - TAG_NAME=$(curl -s "$GITHUB_API/tags" | grep '"name"' | head -n 1 | sed -E 's/.*"([^"]+)".*/\1/') - DOWNLOAD_URL=$(curl -s "$GITHUB_API/releases/tags/$TAG_NAME" \ - | grep -o '"browser_download_url"[[:space:]]*:[[:space:]]*"[^"]*linux_amd64\.tar\.gz[^"]*"' \ - | sed 's/.*"browser_download_url"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') + TAG_NAME=$(curl -s "$GITHUB_API/tags?per_page=1" | grep '"name"' | head -n 1 | sed -E 's/.*"([^"]+)".*/\1/') fi + DOWNLOAD_URL=$(release_download_url_for_tag "$TAG_NAME") -elif [[ "$MODE" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.*$ ]]; then +elif [[ "$MODE" =~ ^v\.?[0-9]+\.[0-9]+\.[0-9]+.*$ ]]; then TAG_NAME="$MODE" - if command -v jq >/dev/null 2>&1; then - DOWNLOAD_URL=$(curl -s "$GITHUB_API/releases/tags/$TAG_NAME" \ - | jq -r '.assets[]? | select(.name | test("linux_amd64.tar.gz$")) | .browser_download_url') - else - DOWNLOAD_URL=$(curl -s "$GITHUB_API/releases/tags/$TAG_NAME" \ - | grep -o '"browser_download_url"[[:space:]]*:[[:space:]]*"[^"]*linux_amd64\.tar\.gz[^"]*"' \ - | sed 's/.*"browser_download_url"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') + DOWNLOAD_URL=$(release_download_url_for_tag "$TAG_NAME") + if [ -z "$DOWNLOAD_URL" ]; then + RESOLVED_TAG=$(release_tag_for_pseudo_version "$MODE") + if [ -n "$RESOLVED_TAG" ]; then + TAG_NAME="$RESOLVED_TAG" + DOWNLOAD_URL=$(release_download_url_for_tag "$TAG_NAME") + else + SOURCE_REF=$(pseudo_version_hash "$MODE") + if [ -n "$SOURCE_REF" ]; then + TAG_NAME="$MODE" + fi + fi fi elif [ "$MODE" == "latest-release" ]; then @@ -67,40 +186,48 @@ elif [ "$MODE" == "latest-release" ]; then # Extract tag name and download URL if command -v jq >/dev/null 2>&1; then TAG_NAME=$(echo "$RELEASE_INFO" | jq -r '.tag_name') - DOWNLOAD_URL=$(echo "$RELEASE_INFO" | jq -r '.assets[]? | select(.name | test("linux_amd64.tar.gz$")) | .browser_download_url') else TAG_NAME=$(echo "$RELEASE_INFO" | grep -o '"tag_name"[[:space:]]*:[[:space:]]*"[^"]*"' | sed 's/.*"tag_name"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') - DOWNLOAD_URL=$(echo "$RELEASE_INFO" | grep -o '"browser_download_url"[[:space:]]*:[[:space:]]*"[^"]*linux_amd64\.tar\.gz[^"]*"' | sed 's/.*"browser_download_url"[[:space:]]*:[[:space:]]*"\([^"]*\)".*/\1/') fi + DOWNLOAD_URL=$(echo "$RELEASE_INFO" | first_linux_amd64_asset_url) else echo "Error: Invalid mode '$MODE'" - echo "Usage: $0 [latest-release|latest-tag|vX.Y.Z]" + echo "Usage: $0 [latest-release|latest-tag|vX.Y.Z|go-pseudo-version]" echo " or: LUMERAD_BINARY=/path/to/binary $0" exit 1 fi echo "Selected tag: $TAG_NAME" -# Validate that we have the required information -if [ -z "$TAG_NAME" ] || [ -z "$DOWNLOAD_URL" ]; then - echo "Error: Could not determine tag or download URL" +# Validate that we have either a release asset or a source ref to build. +if [ -z "$TAG_NAME" ] || { [ -z "$DOWNLOAD_URL" ] && [ -z "$SOURCE_REF" ]; }; then + echo "Error: Could not determine tag, download URL, or source ref" exit 1 fi -# Download and extract the release +# Download/extract release assets when available; otherwise build lumerad from +# the pseudo-version commit. Pseudo-version commits usually do not have GitHub +# release assets, but CI still needs the binary to match the Go module code. TEMP_DIR=$(mktemp -d) ORIG_DIR=$(pwd) -curl -L --progress-bar "$DOWNLOAD_URL" -o "$TEMP_DIR/lumera.tar.gz" -cd "$TEMP_DIR" -tar -xzf lumera.tar.gz -rm lumera.tar.gz +if [ -n "$DOWNLOAD_URL" ]; then + curl -L --progress-bar "$DOWNLOAD_URL" -o "$TEMP_DIR/lumera.tar.gz" + + cd "$TEMP_DIR" + tar -xzf lumera.tar.gz + rm lumera.tar.gz -# Install WASM library -WASM_LIB=$(find . -type f -name "libwasmvm*.so" -print -quit) -if [ -n "$WASM_LIB" ]; then - sudo cp "$WASM_LIB" /usr/lib/ + # Install WASM library when the release bundle contains one. + WASM_LIB=$(find . -type f -name "libwasmvm*.so" -print -quit) + if [ -n "$WASM_LIB" ]; then + install_file "$WASM_LIB" /usr/lib/$(basename "$WASM_LIB") || echo "Warning: could not install $(basename "$WASM_LIB"); continuing" + fi +else + echo "No release asset found for $MODE; building lumerad from source ref $SOURCE_REF" + build_lumerad_from_source_ref "$SOURCE_REF" "$TEMP_DIR/lumerad" "$TEMP_DIR" + cd "$TEMP_DIR" fi # Find and install lumerad binary @@ -108,7 +235,7 @@ LUMERAD_PATH=$(find . -type f -name "lumerad" -print -quit) if [ -n "$LUMERAD_PATH" ]; then install_binary "$LUMERAD_PATH" else - echo "Error: Could not find lumerad binary in the package" + echo "Error: Could not find lumerad binary" exit 1 fi diff --git a/tests/system/cli.go b/tests/system/cli.go index 633fe933..e62dfaca 100644 --- a/tests/system/cli.go +++ b/tests/system/cli.go @@ -39,7 +39,7 @@ func NewLumeradCLI(t *testing.T, sut *SystemUnderTest, verbose bool) *LumeradCli sut.AwaitNextBlock, sut.nodesCount, filepath.Join(WorkDir, sut.outputDir), - "1"+"ulume", + "1000"+"ulume", verbose, assert.NoError, true, diff --git a/tests/system/e2e_lep6_enforcement_lifecycle_test.go b/tests/system/e2e_lep6_enforcement_lifecycle_test.go new file mode 100644 index 00000000..f6301624 --- /dev/null +++ b/tests/system/e2e_lep6_enforcement_lifecycle_test.go @@ -0,0 +1,198 @@ +//go:build system_test + +package system + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tidwall/sjson" +) + +// TestLEP6StorageTruthEnforcementLifecycle validates the storage-truth +// enforcement-state path on a real local lumerad chain. It deliberately uses +// deterministic storage-proof txs instead of opportunistic daemon timing so the +// test can prove the chain predicates directly: +// +// 1. a confirmed INDEX failure raises target node suspicion into postpone band; +// 2. epoch-end enforcement moves the target supernode to POSTPONED; +// 3. postponed target remains challengeable by active reporters; +// 4. clean PASS proofs reduce suspicion below watch and satisfy clean-pass recovery; +// 5. epoch-end enforcement recovers the supernode to ACTIVE. +func TestLEP6StorageTruthEnforcementLifecycle(t *testing.T) { + os.Setenv("INTEGRATION_TEST", "true") + defer os.Unsetenv("INTEGRATION_TEST") + + const ( + epochLengthBlocks = uint64(8) + originHeight = int64(1) + ticketID = "sys-test-ticket-soft-postpone" + ) + + t.Log("Storage-truth enforcement Step 1: start real chain with low enforcement thresholds") + sut.ModifyGenesisJSON(t, + SetStakingBondDenomUlume(t), + SetActionParams(t), + SetSupernodeMetricsParams(t), + setSupernodeParamsForAuditTests(t), + setAuditParamsForFastEpochs(t, epochLengthBlocks, 1, 1, 1, []uint32{4444}), + setAuditMissingReportGraceForRuntimeE2E(t), + // FULL mode submits RECENT+OLD INDEX HASH_MISMATCH results for two distinct + // tickets. Together they contribute at least +104 node suspicion and also + // satisfy the chain's distinct-ticket postpone predicate. Use production + // decay semantics, compressed to local epochs, so recovery proves both clean + // PASS credit and decay-adjusted suspicion falling below watch. + setStorageTruthTestParams(t, "STORAGE_TRUTH_ENFORCEMENT_MODE_FULL", 104, 100, 1000, 920, 10), + setStorageTruthEnforcementRecoveryParams(t, 1, 1), + ) + sut.StartChain(t) + cli := NewLumeradCLI(t, sut, true) + + n0 := getNodeIdentity(t, cli, "node0") + n1 := getNodeIdentity(t, cli, "node1") + n2 := getNodeIdentity(t, cli, "node2") + nodes := []testNodeIdentity{n0, n1, n2} + registerSupernode(t, cli, n0, "localhost:4444") + registerSupernode(t, cli, n1, "localhost:4446") + registerSupernode(t, cli, n2, "localhost:4448") + bootstrapRuntimeSupernodeEligibility(t, cli) + + t.Log("Storage-truth enforcement Step 2: submit deterministic INDEX HASH_MISMATCH across two assigned epochs") + failEpochID, failEpochEnd := nextUsableEpoch(t, originHeight, epochLengthBlocks) + awaitCurrentEpochAnchorWithActiveSupernodes(t, failEpochID, n0.accAddr, n1.accAddr, n2.accAddr) + target := selectAssignedStorageTruthTarget(t, failEpochID, nodes) + seedProofTranscriptsWithClass( + t, + cli, + failEpochID, + nodes, + target.accAddr, + []transcriptSeed{{ticketID: ticketID + "-a", transcriptHash: "storage-truth-index-hash-mismatch-a"}}, + true, + "STORAGE_PROOF_RESULT_CLASS_HASH_MISMATCH", + ) + awaitAtLeastHeight(t, failEpochEnd) + sut.AwaitNextBlock(t) + + failEpochID, failEpochEnd = nextEpochWithAssignedStorageTruthTarget(t, originHeight, epochLengthBlocks, nodes, target.accAddr) + seedProofTranscriptsWithClass( + t, + cli, + failEpochID, + nodes, + target.accAddr, + []transcriptSeed{{ticketID: ticketID + "-b", transcriptHash: "storage-truth-index-hash-mismatch-b"}}, + true, + "STORAGE_PROOF_RESULT_CLASS_HASH_MISMATCH", + ) + + nodeState, found := auditQueryNodeSuspicionStateST(t, target.accAddr) + require.True(t, found, "HASH_MISMATCH must create node suspicion state for target") + require.GreaterOrEqual(t, nodeState.SuspicionScore, int64(104), "target suspicion must reach postpone threshold") + require.Equal(t, uint32(4), nodeState.ClassACountWindow, "FULL-mode INDEX HASH_MISMATCH must count RECENT+OLD Class-A failures for both tickets") + require.Equal(t, uint32(0), nodeState.CleanPassCount, "failure must reset/avoid clean-pass recovery credit") + + t.Log("Storage-truth enforcement Step 3: epoch-end enforcement postpones the suspected target") + awaitAtLeastHeight(t, failEpochEnd) + sut.AwaitNextBlock(t) + require.Eventually(t, func() bool { + return querySupernodeLatestState(t, cli, target.valAddr) == "SUPERNODE_STATE_POSTPONED" + }, 45*time.Second, time.Second, "target supernode must become POSTPONED after suspicion threshold crossing") + + t.Log("Storage-truth enforcement Step 4: submit clean PASS proofs while target is postponed") + passCandidates := storageTruthCandidatesExcept(nodes, target.accAddr) + passEpochID, passEpochEnd := nextEpochWithAssignedStorageTruthTarget(t, originHeight, epochLengthBlocks, passCandidates, target.accAddr) + seedProofTranscriptsWithClass( + t, + cli, + passEpochID, + passCandidates, + target.accAddr, + []transcriptSeed{{ticketID: ticketID + "-a", transcriptHash: "storage-truth-clean-pass-a"}}, + true, + "STORAGE_PROOF_RESULT_CLASS_PASS", + ) + + recoveredScore, found := auditQueryNodeSuspicionStateST(t, target.accAddr) + require.True(t, found) + require.GreaterOrEqual(t, recoveredScore.CleanPassCount, uint32(1), "PASS pair must satisfy configured clean-pass recovery requirement") + + t.Log("Storage-truth enforcement Step 5: epoch-end enforcement recovers the clean target after decay-adjusted suspicion falls below watch") + // Raw query state is only updated on score writes; enforcement applies the + // chain-authoritative decay-adjusted read at epoch end. Advance enough fast + // local epochs for score decay to move below the watch threshold, then assert + // the postponed node recovers to ACTIVE. + awaitAtLeastHeight(t, passEpochEnd+int64(epochLengthBlocks*6), 2*time.Minute) + sut.AwaitNextBlock(t) + require.Eventually(t, func() bool { + return querySupernodeLatestState(t, cli, target.valAddr) == "SUPERNODE_STATE_ACTIVE" + }, 45*time.Second, time.Second, "clean target must recover to ACTIVE after score and clean-pass predicates are satisfied") +} + +func nextUsableEpoch(t *testing.T, originHeight int64, epochLengthBlocks uint64) (uint64, int64) { + t.Helper() + currentHeight := sut.AwaitNextBlock(t) + epochID, epochStart := nextEpochAfterHeight(originHeight, epochLengthBlocks, currentHeight) + awaitAtLeastHeight(t, epochStart) + return epochID, epochStart + int64(epochLengthBlocks) +} + +func selectAssignedStorageTruthTarget(t *testing.T, epochID uint64, candidates []testNodeIdentity) testNodeIdentity { + t.Helper() + byAccount := make(map[string]testNodeIdentity, len(candidates)) + for _, c := range candidates { + byAccount[c.accAddr] = c + } + for _, reporter := range candidates { + resp := auditQueryAssignedTargets(t, epochID, true, reporter.accAddr) + for _, targetAcct := range resp.TargetSupernodeAccounts { + if target, ok := byAccount[targetAcct]; ok && target.accAddr != reporter.accAddr { + return target + } + } + } + require.Failf(t, "no assigned storage-truth target", "no candidate had an assigned target in epoch %d", epochID) + return testNodeIdentity{} +} + +func storageTruthCandidatesExcept(candidates []testNodeIdentity, account string) []testNodeIdentity { + out := make([]testNodeIdentity, 0, len(candidates)-1) + for _, c := range candidates { + if c.accAddr != account { + out = append(out, c) + } + } + return out +} + +func nextEpochWithAssignedStorageTruthTarget(t *testing.T, originHeight int64, epochLengthBlocks uint64, candidates []testNodeIdentity, targetAcct string) (uint64, int64) { + t.Helper() + for attempt := 0; attempt < 8; attempt++ { + epochID, epochEnd := nextUsableEpoch(t, originHeight, epochLengthBlocks) + for _, reporter := range candidates { + resp := auditQueryAssignedTargets(t, epochID, true, reporter.accAddr) + for _, assigned := range resp.TargetSupernodeAccounts { + if assigned == targetAcct { + return epochID, epochEnd + } + } + } + awaitAtLeastHeight(t, epochEnd) + sut.AwaitNextBlock(t) + } + require.Failf(t, "target not reassigned", "postponed target %s was not assigned to active reporters within 8 epochs", targetAcct) + return 0, 0 +} + +func setStorageTruthEnforcementRecoveryParams(t *testing.T, cleanPasses, strongCleanPasses uint32) GenesisMutator { + return func(genesis []byte) []byte { + t.Helper() + state, err := sjson.SetBytes(genesis, "app_state.audit.params.storage_truth_recovery_clean_pass_count", cleanPasses) + require.NoError(t, err) + state, err = sjson.SetBytes(state, "app_state.audit.params.storage_truth_strong_recovery_clean_pass_count", strongCleanPasses) + require.NoError(t, err) + return state + } +} diff --git a/tests/system/e2e_lep6_runtime_test.go b/tests/system/e2e_lep6_runtime_test.go index 09043370..5fd708c2 100644 --- a/tests/system/e2e_lep6_runtime_test.go +++ b/tests/system/e2e_lep6_runtime_test.go @@ -243,6 +243,12 @@ func requireFinalizedCascadeArtifactCounts(t *testing.T, ctx context.Context, cl func recoverChainKey(t *testing.T, binaryPath, homePath, keyName, mnemonic string) { t.Helper() + // The system-test lumerad home is reused across package tests and local reruns. + // Ensure this helper is deterministic instead of silently reusing a stale key + // with the same name from an earlier test run. + deleteCmd := exec.Command(binaryPath, "keys", "delete", keyName, "--yes", "--keyring-backend=test", "--home", homePath) + _ = deleteCmd.Run() + cmd := exec.Command(binaryPath, "keys", "add", keyName, "--recover", "--keyring-backend=test", "--home", homePath) cmd.Stdin = strings.NewReader(mnemonic + "\n") out, err := cmd.CombinedOutput() diff --git a/tests/system/go.sum b/tests/system/go.sum index 6c90ab12..a721fde4 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -107,7 +107,7 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.12.0 h1:BHkPF/vCKyGFKtl2MMxtRpUyzraJ96rWY9FniTbG6cU= +github.com/LumeraProtocol/lumera v1.12.0 h1:ZtGvnwuwOYbbveV21581D6LbMhy9KOVbDAtmck7VAyY= github.com/LumeraProtocol/lumera v1.12.0/go.mod h1:/G9LTPZB+261tHoWoj7q+1fn+O/VV0zzagwLdsThSNo= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= diff --git a/tests/system/main_test.go b/tests/system/main_test.go index 67c97efa..4df1cbe1 100644 --- a/tests/system/main_test.go +++ b/tests/system/main_test.go @@ -96,6 +96,11 @@ func requireEnoughFileHandlers(nodesCount int) { func initSDKConfig(bech32Prefix string) { config := sdk.GetConfig() + if config.GetBech32AccountAddrPrefix() == bech32Prefix && + config.GetBech32ValidatorAddrPrefix() == bech32Prefix+sdk.PrefixValidator+sdk.PrefixOperator && + config.GetBech32ConsensusAddrPrefix() == bech32Prefix+sdk.PrefixValidator+sdk.PrefixConsensus { + return + } config.SetBech32PrefixForAccount(bech32Prefix, bech32Prefix+sdk.PrefixPublic) config.SetBech32PrefixForValidator(bech32Prefix+sdk.PrefixValidator+sdk.PrefixOperator, bech32Prefix+sdk.PrefixValidator+sdk.PrefixOperator+sdk.PrefixPublic) config.SetBech32PrefixForConsensusNode(bech32Prefix+sdk.PrefixValidator+sdk.PrefixConsensus, bech32Prefix+sdk.PrefixValidator+sdk.PrefixConsensus+sdk.PrefixPublic)