Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ INSTALL_MODE ?= $(if $(LUMERA_DEFAULT_VERSION),$(LUMERA_DEFAULT_VERSION),latest-
install-lumera:
@echo "Installing Lumera..."
@chmod +x tests/scripts/install-lumera.sh
@sudo LUMERAD_BINARY="$(LUMERAD_BINARY)" tests/scripts/install-lumera.sh $(INSTALL_MODE)
@LUMERAD_BINARY="$(LUMERAD_BINARY)" tests/scripts/install-lumera.sh $(INSTALL_MODE)
@echo "PtTDUHythfRfXHh63yzyiGDid4TZj2P76Zd,18749999981413" > ~/claims.csv

# Setup supernode environments
Expand Down
2 changes: 1 addition & 1 deletion go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/LumeraProtocol/lumera v1.12.0 h1:BHkPF/vCKyGFKtl2MMxtRpUyzraJ96rWY9FniTbG6cU=
github.com/LumeraProtocol/lumera v1.12.0 h1:ZtGvnwuwOYbbveV21581D6LbMhy9KOVbDAtmck7VAyY=
github.com/LumeraProtocol/lumera v1.12.0/go.mod h1:/G9LTPZB+261tHoWoj7q+1fn+O/VV0zzagwLdsThSNo=
github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4=
github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8=
Expand Down
2 changes: 1 addition & 1 deletion pkg/lumera/chainerrors/chainerrors_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import (

// abciErr re-creates an error with typed sentinel preserved across %w wrap,
// matching the production wrap path in pkg/lumera/modules/tx/impl.go after
// the Wave 0 boundary fix.
// the LEP-6 review fix boundary fix.
func abciErr(sentinel *errorsmod.Error, rawLog string) error {
return fmt.Errorf("tx failed: code=%d codespace=%s height=0 gas_wanted=0 gas_used=0 raw_log=%s: %w",
sentinel.ABCICode(), sentinel.Codespace(), rawLog, sentinel)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ func TestMigrateStorageRecheckSubmissionsPK(t *testing.T) {
defer db.Close()
ctx := context.Background()

// Seed the OLD schema (pre-Wave-1 PK).
// Seed the OLD schema (before this fix PK).
const oldSchema = `
CREATE TABLE storage_recheck_submissions (
epoch_id INTEGER NOT NULL,
Expand Down
6 changes: 3 additions & 3 deletions pkg/storage/queries/recheck_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ import (
"github.com/stretchr/testify/require"
)

// TestRecheckSubmissionDedupPerTarget asserts the Wave 1 / C2 fix: chain
// TestRecheckSubmissionDedupPerTarget asserts the LEP-6 C2 fix: chain
// dedup is per-(epoch, ticket, target_account), so two distinct targets
// within the same (epoch, ticket) must produce two persisted rows. Before
// Wave 1, the PK was (epoch, ticket) and the second target's row was
// LEP-6 review fix, the PK was (epoch, ticket) and the second target's row was
// silently dropped — masking that supernode from chain N/R/D math.
func TestRecheckSubmissionDedupPerTarget(t *testing.T) {
db := sqlx.MustConnect("sqlite3", ":memory:")
Expand Down Expand Up @@ -63,7 +63,7 @@ func TestRecheckSubmissionDedupPerTarget(t *testing.T) {
}

// TestRecordPendingRecheckSubmission_DuplicateReturnsTypedError covers the
// Wave 1 / L3 fix: duplicate-pending writes used to be silently swallowed
// LEP-6 L3 fix: duplicate-pending writes used to be silently swallowed
// by `INSERT OR IGNORE`; they now return ErrLEP6RecheckAlreadyRecorded so
// the attestor can branch on it.
func TestRecordPendingRecheckSubmission_DuplicateReturnsTypedError(t *testing.T) {
Expand Down
4 changes: 2 additions & 2 deletions pkg/storage/queries/self_healing_lep6_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func TestLEP6HealClaimPendingLifecycle(t *testing.T) {
ctx := context.Background()

require.NoError(t, store.RecordPendingHealClaim(ctx, 101, "ticket-101", "manifest", "/tmp/stage"))
// Wave 2 / C5 fix: HasHealClaim returns SUBMITTED-only. A pending row
// LEP-6 C5 fix: HasHealClaim returns SUBMITTED-only. A pending row
// must not block fresh dispatch.
has, err := store.HasHealClaim(ctx, 101)
require.NoError(t, err)
Expand Down Expand Up @@ -124,7 +124,7 @@ func TestLEP6HealVerificationPendingLifecycle(t *testing.T) {
ctx := context.Background()

require.NoError(t, store.RecordPendingHealVerification(ctx, 202, "verifier-a", true, "hash"))
// Wave 2 / C5 fix: pending must NOT count as submitted.
// LEP-6 C5 fix: pending must NOT count as submitted.
has, err := store.HasHealVerification(ctx, 202, "verifier-a")
require.NoError(t, err)
require.False(t, has, "pending row must NOT count as submitted (C5)")
Expand Down
18 changes: 13 additions & 5 deletions pkg/storagechallenge/deterministic/lep6.go
Original file line number Diff line number Diff line change
Expand Up @@ -494,8 +494,10 @@ func SelectArtifactOrdinal(seed []byte, target, ticketID string, class audittype
// passed explicitly so a future param change at the chain level can be
// surfaced cleanly. The returned slice has length exactly k.
//
// Returns an error if rangeLen >= artifactSize (would yield negative modulus
// space) or if any input is degenerate (k=0, empty class).
// Returns an error if rangeLen > artifactSize (out-of-bounds range),
// artifactSize is zero, or any input is degenerate (k=0, empty class). When
// rangeLen == artifactSize, the whole artifact is challenged and every offset
// is deterministically 0.
//
// IMPORTANT: u32be(ordinal) and u32be(i) are written as raw 4-byte
// big-endian integers, not as decimal-string forms — this keeps the byte
Expand All @@ -505,18 +507,24 @@ func ComputeMultiRangeOffsets(seed []byte, target, ticketID string, class auditt
if k <= 0 {
return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: k must be > 0")
}
if artifactSize == 0 {
return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: artifactSize must be > 0")
}
if rangeLen == 0 {
return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: rangeLen must be > 0")
}
if artifactSize <= rangeLen {
return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: artifactSize (%d) must be > rangeLen (%d)", artifactSize, rangeLen)
if rangeLen > artifactSize {
return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: rangeLen (%d) must be <= artifactSize (%d)", rangeLen, artifactSize)
}
classDomain := ArtifactClassDomain(class)
if classDomain == "" {
return nil, fmt.Errorf("deterministic.ComputeMultiRangeOffsets: unsupported class %v", class)
}
span := artifactSize - rangeLen
offsets := make([]uint64, k)
if rangeLen == artifactSize {
return offsets, nil
}
span := artifactSize - rangeLen
var ordBuf, idxBuf [4]byte
binary.BigEndian.PutUint32(ordBuf[:], ordinal)
for i := 0; i < k; i++ {
Expand Down
31 changes: 31 additions & 0 deletions pkg/storagechallenge/deterministic/lep6_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -384,6 +384,37 @@ func TestComputeMultiRangeOffsets_AllInBounds(t *testing.T) {
}
}

func TestComputeMultiRangeOffsets_WholeArtifactWhenRangeEqualsSize(t *testing.T) {
const size = uint64(103)
offsets, err := ComputeMultiRangeOffsets(chainSeed, "sn-target", "ticket-small",
audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL, 0, size, size, 4)
if err != nil {
t.Fatalf("err: %v", err)
}
if len(offsets) != 4 {
t.Fatalf("expected 4 offsets, got %d", len(offsets))
}
for i, off := range offsets {
if off != 0 {
t.Fatalf("offset %d = %d, want 0 for whole-artifact challenge", i, off)
}
}
}

func TestComputeMultiRangeOffsets_RejectsOutOfBoundsRangeLen(t *testing.T) {
cls := audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL
if _, err := ComputeMultiRangeOffsets(chainSeed, "x", "t", cls, 0, 100, 256, 4); err == nil {
t.Fatal("expected error when rangeLen exceeds artifactSize")
}
}

func TestComputeMultiRangeOffsets_RejectsZeroArtifactSize(t *testing.T) {
cls := audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL
if _, err := ComputeMultiRangeOffsets(chainSeed, "x", "t", cls, 0, 0, 0, 4); err == nil {
t.Fatal("expected error for zero artifactSize")
}
}

func TestComputeMultiRangeOffsets_OffsetsDistinctOnDifferentInputs(t *testing.T) {
const size, rl = uint64(10000), uint64(256)
a, _ := ComputeMultiRangeOffsets(chainSeed, "sn-target", "ticket-1",
Expand Down
46 changes: 27 additions & 19 deletions pkg/storagechallenge/lep6_resolution.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,26 +34,32 @@ var ErrUnspecifiedArtifactClass = errors.New("storagechallenge: artifact class i
// replaces a chain GetTicketArtifactCount RPC that does not exist (LEP-6 v2
// plan §9, Resolved Decision 8).
//
// Semantics:
// - INDEX -> uint32(meta.RqIdsIc)
// - SYMBOL -> uint32(len(meta.RqIdsIds))
// Semantics mirror Lumera chain action metadata exactly via
// actiontypes.CascadeArtifactCountsWithFallbackStrict:
// - INDEX -> meta.IndexArtifactCount, falling back to len(meta.RqIdsIds)
// - SYMBOL -> meta.SymbolArtifactCount, falling back to len(meta.RqIdsIds)
// - UNSPECIFIED -> error
//
// If both counts are zero (legacy / malformed ticket), this returns (0, nil)
// because the chain accepts that case via its TicketArtifactCountState fallback
// path (x/audit/v1/keeper/msg_submit_epoch_report_storage_proofs.go). Callers
// decide whether to skip such a ticket.
// The strict Lumera helper rejects malformed metadata where explicit counts are
// missing and the fallback universe is empty. This keeps supernode proof rows
// aligned with chain validation instead of maintaining duplicate count logic.
func ResolveArtifactCount(meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass) (uint32, error) {
if meta == nil {
return 0, errors.New("storagechallenge: nil cascade metadata")
}
switch class {
case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX:
return uint32(meta.RqIdsIc), nil
case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL:
return uint32(len(meta.RqIdsIds)), nil
case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED:
return 0, ErrUnspecifiedArtifactClass
case audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX,
audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL:
idx, sym, err := actiontypes.CascadeArtifactCountsWithFallbackStrict(meta)
if err != nil {
return 0, fmt.Errorf("storagechallenge: resolve canonical cascade artifact counts: %w", err)
}
if class == audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX {
return idx, nil
}
return sym, nil
default:
return 0, fmt.Errorf("storagechallenge: unknown artifact class %v", class)
}
Expand Down Expand Up @@ -102,17 +108,19 @@ func ResolveArtifactKey(meta *actiontypes.CascadeMetadata, class audittypes.Stor
}
}

// ResolveArtifactSize returns the exact byte size used to derive LEP-6
// multi-range offsets for a selected artifact.
//
// SYMBOL artifacts are RaptorQ symbols. The exact symbol size is derived from
// the finalized Action.FileSizeKbs and meta.RqIdsMax:
// ResolveArtifactSize returns the deterministic metadata-derived byte-size
// fallback used to derive LEP-6 multi-range offsets for a selected artifact.
//
// symbolSize = ceil(fileSizeKbs*1024 / meta.RqIdsMax)
// Production dispatch should prefer the stored blob size returned by the same
// storage layer used by the recipient ArtifactReader. SYMBOL artifacts are
// compressed/content-addressed RaptorQ blobs, and their served byte length can
// differ from the logical ceil(fileSizeKbs*1024 / meta.RqIdsMax) estimate.
// This fallback remains useful for tests and for deployments where the local
// store cannot report the selected artifact size.
//
// INDEX artifacts are generated deterministically from meta.Signatures,
// meta.RqIdsIc, and meta.RqIdsMax; their exact compressed byte length is the
// length of the selected generated index file.
// meta.RqIdsIc, and meta.RqIdsMax; their fallback byte length is the length of
// the selected generated index file.
func ResolveArtifactSize(act *actiontypes.Action, meta *actiontypes.CascadeMetadata, class audittypes.StorageProofArtifactClass, ordinal uint32) (uint64, error) {
if act == nil {
return 0, errors.New("storagechallenge: nil action")
Expand Down
37 changes: 27 additions & 10 deletions pkg/storagechallenge/lep6_resolution_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,44 +16,61 @@ func TestMaxStorageProofResultsPerReportTracksChainConstant(t *testing.T) {

func TestResolveArtifactCount_Index_Symbol_Unspecified(t *testing.T) {
meta := &actiontypes.CascadeMetadata{
RqIdsIc: 7,
RqIdsMax: 12,
RqIdsIds: []string{"a", "b", "c", "d"},
RqIdsIc: 7,
RqIdsMax: 12,
RqIdsIds: []string{"a", "b", "c", "d"},
IndexArtifactCount: 50,
SymbolArtifactCount: 60,
}

gotIdx, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX)
if err != nil {
t.Fatalf("INDEX: unexpected error: %v", err)
}
if gotIdx != 7 {
t.Fatalf("INDEX count: want 7, got %d", gotIdx)
if gotIdx != 50 {
t.Fatalf("INDEX count: want chain-canonical 50, got %d", gotIdx)
}

gotSym, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL)
if err != nil {
t.Fatalf("SYMBOL: unexpected error: %v", err)
}
if gotSym != 4 {
t.Fatalf("SYMBOL count: want 4, got %d", gotSym)
if gotSym != 60 {
t.Fatalf("SYMBOL count: want chain-canonical 60, got %d", gotSym)
}

if _, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_UNSPECIFIED); err == nil {
t.Fatalf("UNSPECIFIED: expected error, got nil")
}
}

func TestResolveArtifactCount_UsesLumeraCanonicalFallback(t *testing.T) {
meta := &actiontypes.CascadeMetadata{
RqIdsIc: 11,
RqIdsIds: make([]string, 50),
}

gotIdx, err := ResolveArtifactCount(meta, audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX)
if err != nil {
t.Fatalf("INDEX: unexpected error: %v", err)
}
if gotIdx != 50 {
t.Fatalf("INDEX count must mirror Lumera CascadeArtifactCountsWithFallbackStrict fallback: want 50, got %d", gotIdx)
}
}

func TestResolveArtifactCount_LegacyZero(t *testing.T) {
meta := &actiontypes.CascadeMetadata{} // both INDEX (RqIdsIc) and SYMBOL (len(RqIdsIds)) are zero
for _, class := range []audittypes.StorageProofArtifactClass{
audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_INDEX,
audittypes.StorageProofArtifactClass_STORAGE_PROOF_ARTIFACT_CLASS_SYMBOL,
} {
got, err := ResolveArtifactCount(meta, class)
if err != nil {
t.Fatalf("class=%v: legacy zero should not error, got: %v", class, err)
if err == nil {
t.Fatalf("class=%v: malformed empty metadata should error", class)
}
if got != 0 {
t.Fatalf("class=%v: want 0, got %d", class, got)
t.Fatalf("class=%v: errored metadata should return 0, got %d", class, got)
}
}
}
Expand Down
6 changes: 4 additions & 2 deletions supernode/cmd/start.go
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,9 @@ The supernode will connect to the Lumera network and begin participating in the
hostReporter.SetProofResultProvider(resultBuffer)
}

artifactReader := storageChallengeService.NewP2PArtifactReader(p2pService)
storageChallengeServer := storageChallengeRPC.NewServer(appConfig.SupernodeConfig.Identity, p2pService, historyStore).
WithArtifactReader(newP2PArtifactReader(p2pService)).
WithArtifactReader(artifactReader).
WithRecipientSigner(kr, appConfig.SupernodeConfig.KeyName).
WithAuditParams(lumeraClient.Audit())
var storageChallengeRunner *storageChallengeService.Service
Expand Down Expand Up @@ -263,12 +264,13 @@ The supernode will connect to the Lumera network and begin participating in the
appConfig.SupernodeConfig.Identity,
storageChallengeService.NewSecureSupernodeClientFactory(lumeraClient, kr, appConfig.SupernodeConfig.Identity, appConfig.SupernodeConfig.Port),
storageChallengeService.NewChainTicketProvider(lumeraClient),
newCascadeMetaProvider(lumeraClient),
storageChallengeService.NewCascadeMetaProvider(lumeraClient),
resultBuffer,
)
if derr != nil {
logtrace.Fatal(ctx, "Failed to initialize LEP-6 dispatcher", logtrace.Fields{"error": derr.Error()})
}
dispatcher.SetArtifactSizeProvider(artifactReader)
storageChallengeRunner.SetLEP6Dispatcher(dispatcher)

if appConfig.StorageChallengeConfig.LEP6.Recheck.Enabled {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import (
"testing"
)

// Wave 4 — LEP-6 PR286 review fix regression tests.
// LEP-6 review regression: LEP-6 PR286 review fix regression tests.
//
// Coverage:
// - C1: missing-block default for LEP-6 toggles is FALSE (no silent
Expand All @@ -16,7 +16,7 @@ import (
// cases (wrong-direction default would cause auto-opt-in) and the
// advisory helper.
// - L6: structural validator rejects recheck=true with disabled parents.
// Pre-Wave-4, fixtures could carry recheck.enabled=true while
// Before this fix, fixtures could carry recheck.enabled=true while
// storage_challenge.enabled=false, silently no-op'd at runtime.

func TestLoadConfig_C1_MissingBlocksDefaultDisabled(t *testing.T) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ import (
audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types"
)

// Wave 4 — LEP-6 review M6 (Matee, 2026-05-06). probeTCP must distinguish
// LEP-6 review regression: LEP-6 review M6 (Matee, 2026-05-06). probeTCP must distinguish
// canonical CLOSED (ECONNREFUSED) from operator-side faults (DNS, host
// unreach, ctx errors, timeouts) which now report UNKNOWN.

Expand Down Expand Up @@ -64,7 +64,7 @@ func TestProbeTCP_M6_RefusedReturnsClosed(t *testing.T) {
}
}

// TestProbeTCP_M6_DNSFailureReturnsUnknown — pre-Wave-4 a DNS resolution
// TestProbeTCP_M6_DNSFailureReturnsUnknown — before this fix a DNS resolution
// failure mapped to CLOSED, falsely accusing the peer's port of being shut.
// Now must map to UNKNOWN.
func TestProbeTCP_M6_DNSFailureReturnsUnknown(t *testing.T) {
Expand Down
17 changes: 13 additions & 4 deletions supernode/host_reporter/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,16 @@ func (s *Service) tick(ctx context.Context) {
var storageProofResults []*audittypes.StorageProofResult
if proofResultProvider := s.getProofResultProvider(); proofResultProvider != nil {
storageProofResults = proofResultProvider.CollectResults(epochID)
if s.fullModeStorageProofCoverageRequired(tickCtx) {
mode, modeOK := s.storageTruthEnforcementMode(tickCtx)
if modeOK && mode != audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED && len(assignResp.TargetSupernodeAccounts) > 0 && len(storageProofResults) == 0 {
logtrace.Warn(tickCtx, "epoch report skipped: waiting for LEP-6 storage proof results", logtrace.Fields{
"epoch_id": epochID,
"assigned_targets": len(assignResp.TargetSupernodeAccounts),
"mode": mode.String(),
})
return
}
if modeOK && mode == audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL {
complete, reason := storageProofCoverageComplete(storageProofResults, assignResp.TargetSupernodeAccounts)
if !complete {
if requeuer, ok := proofResultProvider.(ProofResultRequeuer); ok {
Expand Down Expand Up @@ -231,12 +240,12 @@ func (s *Service) tick(ctx context.Context) {
})
}

func (s *Service) fullModeStorageProofCoverageRequired(ctx context.Context) bool {
func (s *Service) storageTruthEnforcementMode(ctx context.Context) (audittypes.StorageTruthEnforcementMode, bool) {
paramsResp, err := s.lumera.Audit().GetParams(ctx)
if err != nil || paramsResp == nil {
return false
return audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_UNSPECIFIED, false
}
return paramsResp.Params.StorageTruthEnforcementMode == audittypes.StorageTruthEnforcementMode_STORAGE_TRUTH_ENFORCEMENT_MODE_FULL
return paramsResp.Params.StorageTruthEnforcementMode, true
}

func storageProofCoverageComplete(results []*audittypes.StorageProofResult, targets []string) (bool, string) {
Expand Down
Loading