From 6f3eebffe218ec06b27b476b0e35e754653b741f Mon Sep 17 00:00:00 2001 From: j-rafique Date: Fri, 6 Feb 2026 21:07:37 +0500 Subject: [PATCH 1/8] audit: module --- Makefile | 3 +- gen/supernode/service.pb.go | 2 +- gen/supernode/service.pb.gw.go | 644 +++++++----------- gen/supernode/status.pb.go | 7 +- gen/supernode/storage_challenge.pb.go | 533 +++++++++++++++ gen/supernode/storage_challenge.swagger.json | 109 +++ gen/supernode/storage_challenge_grpc.pb.go | 164 +++++ go.mod | 5 +- pkg/lumera/Readme.md | 3 + pkg/lumera/client.go | 33 + pkg/lumera/interface.go | 4 + pkg/lumera/lumera_mock.go | 30 + pkg/lumera/modules/audit/impl.go | 75 ++ pkg/lumera/modules/audit/interface.go | 23 + .../modules/audit_msg/audit_msg_mock.go | 73 ++ pkg/lumera/modules/audit_msg/impl.go | 101 +++ pkg/lumera/modules/audit_msg/interface.go | 31 + .../deterministic/deterministic.go | 160 +++++ .../deterministic/deterministic_test.go | 46 ++ pkg/testutil/lumera.go | 53 ++ proto/supernode/storage_challenge.proto | 60 ++ supernode/audit_reporter/service.go | 244 +++++++ supernode/cmd/start.go | 75 +- supernode/config.yml | 6 + supernode/config/config.go | 22 +- supernode/config/defaults.go | 19 +- supernode/config/save.go | 5 + supernode/storage_challenge/README.md | 51 ++ supernode/storage_challenge/service.go | 572 ++++++++++++++++ .../reachability_active_probing_test.go | 4 + .../grpc/storage_challenge/handler.go | 193 ++++++ 31 files changed, 2912 insertions(+), 438 deletions(-) create mode 100644 gen/supernode/storage_challenge.pb.go create mode 100644 gen/supernode/storage_challenge.swagger.json create mode 100644 gen/supernode/storage_challenge_grpc.pb.go create mode 100644 pkg/lumera/modules/audit/impl.go create mode 100644 pkg/lumera/modules/audit/interface.go create mode 100644 pkg/lumera/modules/audit_msg/audit_msg_mock.go create mode 100644 pkg/lumera/modules/audit_msg/impl.go create mode 100644 pkg/lumera/modules/audit_msg/interface.go create mode 100644 pkg/storagechallenge/deterministic/deterministic.go create mode 100644 pkg/storagechallenge/deterministic/deterministic_test.go create mode 100644 proto/supernode/storage_challenge.proto create mode 100644 supernode/audit_reporter/service.go create mode 100644 supernode/storage_challenge/README.md create mode 100644 supernode/storage_challenge/service.go create mode 100644 supernode/transport/grpc/storage_challenge/handler.go diff --git a/Makefile b/Makefile index 788c308b..f26ddc27 100644 --- a/Makefile +++ b/Makefile @@ -151,7 +151,7 @@ gen-supernode: --grpc-gateway_out=gen \ --grpc-gateway_opt=paths=source_relative \ --openapiv2_out=gen \ - proto/supernode/service.proto proto/supernode/status.proto + proto/supernode/service.proto proto/supernode/status.proto proto/supernode/storage_challenge.proto # Define the paths SUPERNODE_SRC=supernode/main.go @@ -205,4 +205,3 @@ test-sn-manager: @echo "Running sn-manager e2e tests..." @cd tests/system && ${GO} test -tags=system_test -v -run '^TestSNManager' . - diff --git a/gen/supernode/service.pb.go b/gen/supernode/service.pb.go index ad1ff814..f74c97f0 100644 --- a/gen/supernode/service.pb.go +++ b/gen/supernode/service.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.9 // protoc v3.21.12 // source: supernode/service.proto diff --git a/gen/supernode/service.pb.gw.go b/gen/supernode/service.pb.gw.go index 93983b0f..89e6ca78 100644 --- a/gen/supernode/service.pb.gw.go +++ b/gen/supernode/service.pb.gw.go @@ -10,6 +10,7 @@ package supernode import ( "context" + "errors" "io" "net/http" @@ -24,478 +25,470 @@ import ( ) // Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = metadata.Join - var ( - filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join ) -func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata +var filter_SupernodeService_GetStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +func request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq StatusRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetStatus_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - + var ( + protoReq StatusRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetStatus_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetStatus(ctx, &protoReq) return msg, metadata, err - } func request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListServicesRequest - var metadata runtime.ServerMetadata - + var ( + protoReq ListServicesRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } msg, err := client.ListServices(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_ListServices_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ListServicesRequest - var metadata runtime.ServerMetadata - + var ( + protoReq ListServicesRequest + metadata runtime.ServerMetadata + ) msg, err := server.ListServices(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprof_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprof(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprof_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprof_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprof(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofHeap_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofHeap_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofHeap_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofHeap(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofGoroutine_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofGoroutine(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofGoroutine_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofGoroutine_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofGoroutine(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofAllocs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofAllocs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofAllocs_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofAllocs_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofAllocs(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofBlock_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofBlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofBlock_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofBlock_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofBlock(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofMutex_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofMutex(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofMutex_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofMutex_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofMutex(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofThreadcreate_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofThreadcreate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofThreadcreate_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofThreadcreate_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofThreadcreate(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofProfile_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofCpuRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofCpuRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofProfile(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofProfile_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofCpuRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofCpuRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofProfile_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofProfile(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofCmdline_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofCmdline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofCmdline_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofCmdline_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofCmdline(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofSymbol_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofSymbol(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofSymbol_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofSymbol_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofSymbol(ctx, &protoReq) return msg, metadata, err - } -var ( - filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) +var filter_SupernodeService_GetRawPprofTrace_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} func request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, client SupernodeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetRawPprofTrace(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_SupernodeService_GetRawPprofTrace_0(ctx context.Context, marshaler runtime.Marshaler, server SupernodeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RawPprofRequest - var metadata runtime.ServerMetadata - + var ( + protoReq RawPprofRequest + metadata runtime.ServerMetadata + ) if err := req.ParseForm(); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_SupernodeService_GetRawPprofTrace_0); err != nil { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetRawPprofTrace(ctx, &protoReq) return msg, metadata, err - } // RegisterSupernodeServiceHandlerServer registers the http handlers for service SupernodeService to "mux". // UnaryRPC :call SupernodeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterSupernodeServiceHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server SupernodeServiceServer) error { - - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -507,20 +500,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -532,20 +520,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -557,20 +540,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -582,20 +560,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -607,20 +580,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -632,20 +600,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -657,20 +620,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -682,20 +640,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -707,20 +660,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -732,20 +680,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -757,20 +700,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -782,20 +720,15 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -807,9 +740,7 @@ func RegisterSupernodeServiceHandlerServer(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) return nil @@ -836,7 +767,6 @@ func RegisterSupernodeServiceHandlerFromEndpoint(ctx context.Context, mux *runti } }() }() - return RegisterSupernodeServiceHandler(ctx, mux, conn) } @@ -850,16 +780,13 @@ func RegisterSupernodeServiceHandler(ctx context.Context, mux *runtime.ServeMux, // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "SupernodeServiceClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "SupernodeServiceClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "SupernodeServiceClient" to call the correct interceptors. +// "SupernodeServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client SupernodeServiceClient) error { - - mux.Handle("GET", pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetStatus", runtime.WithHTTPPathPattern("/api/v1/status")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -870,18 +797,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_ListServices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/ListServices", runtime.WithHTTPPathPattern("/api/v1/services")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -892,18 +814,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_ListServices_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprof_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprof", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -914,18 +831,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprof_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofHeap", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/heap")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -936,18 +848,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofGoroutine_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofGoroutine", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/goroutine")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -958,18 +865,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofGoroutine_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofAllocs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofAllocs", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/allocs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -980,18 +882,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofAllocs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofBlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofBlock", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/block")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1002,18 +899,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofBlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofMutex_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofMutex", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/mutex")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1024,18 +916,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofMutex_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofThreadcreate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofThreadcreate", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/threadcreate")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1046,18 +933,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofThreadcreate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofProfile_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofProfile", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/profile")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1068,18 +950,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofProfile_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofCmdline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofCmdline", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/cmdline")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1090,18 +967,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofCmdline_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofSymbol_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofSymbol", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/symbol")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1112,18 +984,13 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofSymbol_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - - mux.Handle("GET", pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_SupernodeService_GetRawPprofTrace_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - var err error - var annotatedContext context.Context - annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/supernode.SupernodeService/GetRawPprofTrace", runtime.WithHTTPPathPattern("/api/v1/debug/raw/pprof/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -1134,66 +1001,39 @@ func RegisterSupernodeServiceHandlerClient(ctx context.Context, mux *runtime.Ser runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - forward_SupernodeService_GetRawPprofTrace_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) - return nil } var ( - pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) - - pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) - - pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) - - pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) - - pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) - - pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) - - pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) - - pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) - + pattern_SupernodeService_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "status"}, "")) + pattern_SupernodeService_ListServices_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "v1", "services"}, "")) + pattern_SupernodeService_GetRawPprof_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"api", "v1", "debug", "raw", "pprof"}, "")) + pattern_SupernodeService_GetRawPprofHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "heap"}, "")) + pattern_SupernodeService_GetRawPprofGoroutine_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "goroutine"}, "")) + pattern_SupernodeService_GetRawPprofAllocs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "allocs"}, "")) + pattern_SupernodeService_GetRawPprofBlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "block"}, "")) + pattern_SupernodeService_GetRawPprofMutex_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "mutex"}, "")) pattern_SupernodeService_GetRawPprofThreadcreate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "threadcreate"}, "")) - - pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) - - pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) - - pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) - - pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) + pattern_SupernodeService_GetRawPprofProfile_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "profile"}, "")) + pattern_SupernodeService_GetRawPprofCmdline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "cmdline"}, "")) + pattern_SupernodeService_GetRawPprofSymbol_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "symbol"}, "")) + pattern_SupernodeService_GetRawPprofTrace_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"api", "v1", "debug", "raw", "pprof", "trace"}, "")) ) var ( - forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage - + forward_SupernodeService_GetStatus_0 = runtime.ForwardResponseMessage + forward_SupernodeService_ListServices_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprof_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofHeap_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofGoroutine_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofAllocs_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofBlock_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofMutex_0 = runtime.ForwardResponseMessage forward_SupernodeService_GetRawPprofThreadcreate_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage - - forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofProfile_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofCmdline_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofSymbol_0 = runtime.ForwardResponseMessage + forward_SupernodeService_GetRawPprofTrace_0 = runtime.ForwardResponseMessage ) diff --git a/gen/supernode/status.pb.go b/gen/supernode/status.pb.go index 74e0d6d7..8b6a75d3 100644 --- a/gen/supernode/status.pb.go +++ b/gen/supernode/status.pb.go @@ -1,18 +1,17 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.6 +// protoc-gen-go v1.36.9 // protoc v3.21.12 // source: supernode/status.proto package supernode import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" unsafe "unsafe" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( diff --git a/gen/supernode/storage_challenge.pb.go b/gen/supernode/storage_challenge.pb.go new file mode 100644 index 00000000..e62e8be7 --- /dev/null +++ b/gen/supernode/storage_challenge.pb.go @@ -0,0 +1,533 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.9 +// protoc v3.21.12 +// source: supernode/storage_challenge.proto + +package supernode + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetSliceProofRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + Seed []byte `protobuf:"bytes,3,opt,name=seed,proto3" json:"seed,omitempty"` + FileKey string `protobuf:"bytes,4,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + RequestedStart uint64 `protobuf:"varint,5,opt,name=requested_start,json=requestedStart,proto3" json:"requested_start,omitempty"` + RequestedEnd uint64 `protobuf:"varint,6,opt,name=requested_end,json=requestedEnd,proto3" json:"requested_end,omitempty"` + ChallengerId string `protobuf:"bytes,7,opt,name=challenger_id,json=challengerId,proto3" json:"challenger_id,omitempty"` + RecipientId string `protobuf:"bytes,8,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` + ObserverIds []string `protobuf:"bytes,9,rep,name=observer_ids,json=observerIds,proto3" json:"observer_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetSliceProofRequest) Reset() { + *x = GetSliceProofRequest{} + mi := &file_supernode_storage_challenge_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSliceProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSliceProofRequest) ProtoMessage() {} + +func (x *GetSliceProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSliceProofRequest.ProtoReflect.Descriptor instead. +func (*GetSliceProofRequest) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{0} +} + +func (x *GetSliceProofRequest) GetChallengeId() string { + if x != nil { + return x.ChallengeId + } + return "" +} + +func (x *GetSliceProofRequest) GetEpochId() uint64 { + if x != nil { + return x.EpochId + } + return 0 +} + +func (x *GetSliceProofRequest) GetSeed() []byte { + if x != nil { + return x.Seed + } + return nil +} + +func (x *GetSliceProofRequest) GetFileKey() string { + if x != nil { + return x.FileKey + } + return "" +} + +func (x *GetSliceProofRequest) GetRequestedStart() uint64 { + if x != nil { + return x.RequestedStart + } + return 0 +} + +func (x *GetSliceProofRequest) GetRequestedEnd() uint64 { + if x != nil { + return x.RequestedEnd + } + return 0 +} + +func (x *GetSliceProofRequest) GetChallengerId() string { + if x != nil { + return x.ChallengerId + } + return "" +} + +func (x *GetSliceProofRequest) GetRecipientId() string { + if x != nil { + return x.RecipientId + } + return "" +} + +func (x *GetSliceProofRequest) GetObserverIds() []string { + if x != nil { + return x.ObserverIds + } + return nil +} + +type GetSliceProofResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + FileKey string `protobuf:"bytes,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Start uint64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` + End uint64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + RecipientId string `protobuf:"bytes,6,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` + Slice []byte `protobuf:"bytes,7,opt,name=slice,proto3" json:"slice,omitempty"` + ProofHashHex string `protobuf:"bytes,8,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` + Ok bool `protobuf:"varint,9,opt,name=ok,proto3" json:"ok,omitempty"` + Error string `protobuf:"bytes,10,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetSliceProofResponse) Reset() { + *x = GetSliceProofResponse{} + mi := &file_supernode_storage_challenge_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetSliceProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSliceProofResponse) ProtoMessage() {} + +func (x *GetSliceProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSliceProofResponse.ProtoReflect.Descriptor instead. +func (*GetSliceProofResponse) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{1} +} + +func (x *GetSliceProofResponse) GetChallengeId() string { + if x != nil { + return x.ChallengeId + } + return "" +} + +func (x *GetSliceProofResponse) GetEpochId() uint64 { + if x != nil { + return x.EpochId + } + return 0 +} + +func (x *GetSliceProofResponse) GetFileKey() string { + if x != nil { + return x.FileKey + } + return "" +} + +func (x *GetSliceProofResponse) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *GetSliceProofResponse) GetEnd() uint64 { + if x != nil { + return x.End + } + return 0 +} + +func (x *GetSliceProofResponse) GetRecipientId() string { + if x != nil { + return x.RecipientId + } + return "" +} + +func (x *GetSliceProofResponse) GetSlice() []byte { + if x != nil { + return x.Slice + } + return nil +} + +func (x *GetSliceProofResponse) GetProofHashHex() string { + if x != nil { + return x.ProofHashHex + } + return "" +} + +func (x *GetSliceProofResponse) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *GetSliceProofResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type VerifySliceProofRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + FileKey string `protobuf:"bytes,3,opt,name=file_key,json=fileKey,proto3" json:"file_key,omitempty"` + Start uint64 `protobuf:"varint,4,opt,name=start,proto3" json:"start,omitempty"` + End uint64 `protobuf:"varint,5,opt,name=end,proto3" json:"end,omitempty"` + Slice []byte `protobuf:"bytes,6,opt,name=slice,proto3" json:"slice,omitempty"` + ProofHashHex string `protobuf:"bytes,7,opt,name=proof_hash_hex,json=proofHashHex,proto3" json:"proof_hash_hex,omitempty"` + ChallengerId string `protobuf:"bytes,8,opt,name=challenger_id,json=challengerId,proto3" json:"challenger_id,omitempty"` + RecipientId string `protobuf:"bytes,9,opt,name=recipient_id,json=recipientId,proto3" json:"recipient_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerifySliceProofRequest) Reset() { + *x = VerifySliceProofRequest{} + mi := &file_supernode_storage_challenge_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerifySliceProofRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifySliceProofRequest) ProtoMessage() {} + +func (x *VerifySliceProofRequest) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifySliceProofRequest.ProtoReflect.Descriptor instead. +func (*VerifySliceProofRequest) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{2} +} + +func (x *VerifySliceProofRequest) GetChallengeId() string { + if x != nil { + return x.ChallengeId + } + return "" +} + +func (x *VerifySliceProofRequest) GetEpochId() uint64 { + if x != nil { + return x.EpochId + } + return 0 +} + +func (x *VerifySliceProofRequest) GetFileKey() string { + if x != nil { + return x.FileKey + } + return "" +} + +func (x *VerifySliceProofRequest) GetStart() uint64 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *VerifySliceProofRequest) GetEnd() uint64 { + if x != nil { + return x.End + } + return 0 +} + +func (x *VerifySliceProofRequest) GetSlice() []byte { + if x != nil { + return x.Slice + } + return nil +} + +func (x *VerifySliceProofRequest) GetProofHashHex() string { + if x != nil { + return x.ProofHashHex + } + return "" +} + +func (x *VerifySliceProofRequest) GetChallengerId() string { + if x != nil { + return x.ChallengerId + } + return "" +} + +func (x *VerifySliceProofRequest) GetRecipientId() string { + if x != nil { + return x.RecipientId + } + return "" +} + +type VerifySliceProofResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ChallengeId string `protobuf:"bytes,1,opt,name=challenge_id,json=challengeId,proto3" json:"challenge_id,omitempty"` + EpochId uint64 `protobuf:"varint,2,opt,name=epoch_id,json=epochId,proto3" json:"epoch_id,omitempty"` + ObserverId string `protobuf:"bytes,3,opt,name=observer_id,json=observerId,proto3" json:"observer_id,omitempty"` + Ok bool `protobuf:"varint,4,opt,name=ok,proto3" json:"ok,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerifySliceProofResponse) Reset() { + *x = VerifySliceProofResponse{} + mi := &file_supernode_storage_challenge_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerifySliceProofResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifySliceProofResponse) ProtoMessage() {} + +func (x *VerifySliceProofResponse) ProtoReflect() protoreflect.Message { + mi := &file_supernode_storage_challenge_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifySliceProofResponse.ProtoReflect.Descriptor instead. +func (*VerifySliceProofResponse) Descriptor() ([]byte, []int) { + return file_supernode_storage_challenge_proto_rawDescGZIP(), []int{3} +} + +func (x *VerifySliceProofResponse) GetChallengeId() string { + if x != nil { + return x.ChallengeId + } + return "" +} + +func (x *VerifySliceProofResponse) GetEpochId() uint64 { + if x != nil { + return x.EpochId + } + return 0 +} + +func (x *VerifySliceProofResponse) GetObserverId() string { + if x != nil { + return x.ObserverId + } + return "" +} + +func (x *VerifySliceProofResponse) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *VerifySliceProofResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +var File_supernode_storage_challenge_proto protoreflect.FileDescriptor + +const file_supernode_storage_challenge_proto_rawDesc = "" + + "\n" + + "!supernode/storage_challenge.proto\x12\tsupernode\"\xbc\x02\n" + + "\x14GetSliceProofRequest\x12!\n" + + "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + + "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x12\n" + + "\x04seed\x18\x03 \x01(\fR\x04seed\x12\x19\n" + + "\bfile_key\x18\x04 \x01(\tR\afileKey\x12'\n" + + "\x0frequested_start\x18\x05 \x01(\x04R\x0erequestedStart\x12#\n" + + "\rrequested_end\x18\x06 \x01(\x04R\frequestedEnd\x12#\n" + + "\rchallenger_id\x18\a \x01(\tR\fchallengerId\x12!\n" + + "\frecipient_id\x18\b \x01(\tR\vrecipientId\x12!\n" + + "\fobserver_ids\x18\t \x03(\tR\vobserverIds\"\x9d\x02\n" + + "\x15GetSliceProofResponse\x12!\n" + + "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + + "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x19\n" + + "\bfile_key\x18\x03 \x01(\tR\afileKey\x12\x14\n" + + "\x05start\x18\x04 \x01(\x04R\x05start\x12\x10\n" + + "\x03end\x18\x05 \x01(\x04R\x03end\x12!\n" + + "\frecipient_id\x18\x06 \x01(\tR\vrecipientId\x12\x14\n" + + "\x05slice\x18\a \x01(\fR\x05slice\x12$\n" + + "\x0eproof_hash_hex\x18\b \x01(\tR\fproofHashHex\x12\x0e\n" + + "\x02ok\x18\t \x01(\bR\x02ok\x12\x14\n" + + "\x05error\x18\n" + + " \x01(\tR\x05error\"\x9e\x02\n" + + "\x17VerifySliceProofRequest\x12!\n" + + "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + + "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x19\n" + + "\bfile_key\x18\x03 \x01(\tR\afileKey\x12\x14\n" + + "\x05start\x18\x04 \x01(\x04R\x05start\x12\x10\n" + + "\x03end\x18\x05 \x01(\x04R\x03end\x12\x14\n" + + "\x05slice\x18\x06 \x01(\fR\x05slice\x12$\n" + + "\x0eproof_hash_hex\x18\a \x01(\tR\fproofHashHex\x12#\n" + + "\rchallenger_id\x18\b \x01(\tR\fchallengerId\x12!\n" + + "\frecipient_id\x18\t \x01(\tR\vrecipientId\"\x9f\x01\n" + + "\x18VerifySliceProofResponse\x12!\n" + + "\fchallenge_id\x18\x01 \x01(\tR\vchallengeId\x12\x19\n" + + "\bepoch_id\x18\x02 \x01(\x04R\aepochId\x12\x1f\n" + + "\vobserver_id\x18\x03 \x01(\tR\n" + + "observerId\x12\x0e\n" + + "\x02ok\x18\x04 \x01(\bR\x02ok\x12\x14\n" + + "\x05error\x18\x05 \x01(\tR\x05error2\xce\x01\n" + + "\x17StorageChallengeService\x12T\n" + + "\rGetSliceProof\x12\x1f.supernode.GetSliceProofRequest\x1a .supernode.GetSliceProofResponse\"\x00\x12]\n" + + "\x10VerifySliceProof\x12\".supernode.VerifySliceProofRequest\x1a#.supernode.VerifySliceProofResponse\"\x00B6Z4github.com/LumeraProtocol/supernode/v2/gen/supernodeb\x06proto3" + +var ( + file_supernode_storage_challenge_proto_rawDescOnce sync.Once + file_supernode_storage_challenge_proto_rawDescData []byte +) + +func file_supernode_storage_challenge_proto_rawDescGZIP() []byte { + file_supernode_storage_challenge_proto_rawDescOnce.Do(func() { + file_supernode_storage_challenge_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_supernode_storage_challenge_proto_rawDesc), len(file_supernode_storage_challenge_proto_rawDesc))) + }) + return file_supernode_storage_challenge_proto_rawDescData +} + +var file_supernode_storage_challenge_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_supernode_storage_challenge_proto_goTypes = []any{ + (*GetSliceProofRequest)(nil), // 0: supernode.GetSliceProofRequest + (*GetSliceProofResponse)(nil), // 1: supernode.GetSliceProofResponse + (*VerifySliceProofRequest)(nil), // 2: supernode.VerifySliceProofRequest + (*VerifySliceProofResponse)(nil), // 3: supernode.VerifySliceProofResponse +} +var file_supernode_storage_challenge_proto_depIdxs = []int32{ + 0, // 0: supernode.StorageChallengeService.GetSliceProof:input_type -> supernode.GetSliceProofRequest + 2, // 1: supernode.StorageChallengeService.VerifySliceProof:input_type -> supernode.VerifySliceProofRequest + 1, // 2: supernode.StorageChallengeService.GetSliceProof:output_type -> supernode.GetSliceProofResponse + 3, // 3: supernode.StorageChallengeService.VerifySliceProof:output_type -> supernode.VerifySliceProofResponse + 2, // [2:4] is the sub-list for method output_type + 0, // [0:2] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_supernode_storage_challenge_proto_init() } +func file_supernode_storage_challenge_proto_init() { + if File_supernode_storage_challenge_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_supernode_storage_challenge_proto_rawDesc), len(file_supernode_storage_challenge_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_supernode_storage_challenge_proto_goTypes, + DependencyIndexes: file_supernode_storage_challenge_proto_depIdxs, + MessageInfos: file_supernode_storage_challenge_proto_msgTypes, + }.Build() + File_supernode_storage_challenge_proto = out.File + file_supernode_storage_challenge_proto_goTypes = nil + file_supernode_storage_challenge_proto_depIdxs = nil +} diff --git a/gen/supernode/storage_challenge.swagger.json b/gen/supernode/storage_challenge.swagger.json new file mode 100644 index 00000000..9304b937 --- /dev/null +++ b/gen/supernode/storage_challenge.swagger.json @@ -0,0 +1,109 @@ +{ + "swagger": "2.0", + "info": { + "title": "supernode/storage_challenge.proto", + "version": "version not set" + }, + "tags": [ + { + "name": "StorageChallengeService" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string" + } + }, + "additionalProperties": {} + }, + "rpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + } + } + } + }, + "supernodeGetSliceProofResponse": { + "type": "object", + "properties": { + "challengeId": { + "type": "string" + }, + "epochId": { + "type": "string", + "format": "uint64" + }, + "fileKey": { + "type": "string" + }, + "start": { + "type": "string", + "format": "uint64" + }, + "end": { + "type": "string", + "format": "uint64" + }, + "recipientId": { + "type": "string" + }, + "slice": { + "type": "string", + "format": "byte" + }, + "proofHashHex": { + "type": "string" + }, + "ok": { + "type": "boolean" + }, + "error": { + "type": "string" + } + } + }, + "supernodeVerifySliceProofResponse": { + "type": "object", + "properties": { + "challengeId": { + "type": "string" + }, + "epochId": { + "type": "string", + "format": "uint64" + }, + "observerId": { + "type": "string" + }, + "ok": { + "type": "boolean" + }, + "error": { + "type": "string" + } + } + } + } +} diff --git a/gen/supernode/storage_challenge_grpc.pb.go b/gen/supernode/storage_challenge_grpc.pb.go new file mode 100644 index 00000000..0844b73d --- /dev/null +++ b/gen/supernode/storage_challenge_grpc.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v3.21.12 +// source: supernode/storage_challenge.proto + +package supernode + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + StorageChallengeService_GetSliceProof_FullMethodName = "/supernode.StorageChallengeService/GetSliceProof" + StorageChallengeService_VerifySliceProof_FullMethodName = "/supernode.StorageChallengeService/VerifySliceProof" +) + +// StorageChallengeServiceClient is the client API for StorageChallengeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// StorageChallengeService exposes minimal RPCs required to run epoch-anchored storage challenges off-chain. +type StorageChallengeServiceClient interface { + GetSliceProof(ctx context.Context, in *GetSliceProofRequest, opts ...grpc.CallOption) (*GetSliceProofResponse, error) + VerifySliceProof(ctx context.Context, in *VerifySliceProofRequest, opts ...grpc.CallOption) (*VerifySliceProofResponse, error) +} + +type storageChallengeServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewStorageChallengeServiceClient(cc grpc.ClientConnInterface) StorageChallengeServiceClient { + return &storageChallengeServiceClient{cc} +} + +func (c *storageChallengeServiceClient) GetSliceProof(ctx context.Context, in *GetSliceProofRequest, opts ...grpc.CallOption) (*GetSliceProofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetSliceProofResponse) + err := c.cc.Invoke(ctx, StorageChallengeService_GetSliceProof_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageChallengeServiceClient) VerifySliceProof(ctx context.Context, in *VerifySliceProofRequest, opts ...grpc.CallOption) (*VerifySliceProofResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(VerifySliceProofResponse) + err := c.cc.Invoke(ctx, StorageChallengeService_VerifySliceProof_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StorageChallengeServiceServer is the server API for StorageChallengeService service. +// All implementations must embed UnimplementedStorageChallengeServiceServer +// for forward compatibility. +// +// StorageChallengeService exposes minimal RPCs required to run epoch-anchored storage challenges off-chain. +type StorageChallengeServiceServer interface { + GetSliceProof(context.Context, *GetSliceProofRequest) (*GetSliceProofResponse, error) + VerifySliceProof(context.Context, *VerifySliceProofRequest) (*VerifySliceProofResponse, error) + mustEmbedUnimplementedStorageChallengeServiceServer() +} + +// UnimplementedStorageChallengeServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedStorageChallengeServiceServer struct{} + +func (UnimplementedStorageChallengeServiceServer) GetSliceProof(context.Context, *GetSliceProofRequest) (*GetSliceProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSliceProof not implemented") +} +func (UnimplementedStorageChallengeServiceServer) VerifySliceProof(context.Context, *VerifySliceProofRequest) (*VerifySliceProofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifySliceProof not implemented") +} +func (UnimplementedStorageChallengeServiceServer) mustEmbedUnimplementedStorageChallengeServiceServer() { +} +func (UnimplementedStorageChallengeServiceServer) testEmbeddedByValue() {} + +// UnsafeStorageChallengeServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StorageChallengeServiceServer will +// result in compilation errors. +type UnsafeStorageChallengeServiceServer interface { + mustEmbedUnimplementedStorageChallengeServiceServer() +} + +func RegisterStorageChallengeServiceServer(s grpc.ServiceRegistrar, srv StorageChallengeServiceServer) { + // If the following call pancis, it indicates UnimplementedStorageChallengeServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&StorageChallengeService_ServiceDesc, srv) +} + +func _StorageChallengeService_GetSliceProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSliceProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageChallengeServiceServer).GetSliceProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageChallengeService_GetSliceProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageChallengeServiceServer).GetSliceProof(ctx, req.(*GetSliceProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _StorageChallengeService_VerifySliceProof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifySliceProofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageChallengeServiceServer).VerifySliceProof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: StorageChallengeService_VerifySliceProof_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageChallengeServiceServer).VerifySliceProof(ctx, req.(*VerifySliceProofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// StorageChallengeService_ServiceDesc is the grpc.ServiceDesc for StorageChallengeService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var StorageChallengeService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "supernode.StorageChallengeService", + HandlerType: (*StorageChallengeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSliceProof", + Handler: _StorageChallengeService_GetSliceProof_Handler, + }, + { + MethodName: "VerifySliceProof", + Handler: _StorageChallengeService_VerifySliceProof_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "supernode/storage_challenge.proto", +} diff --git a/go.mod b/go.mod index a053bda7..c2ad879c 100644 --- a/go.mod +++ b/go.mod @@ -6,9 +6,8 @@ replace ( github.com/envoyproxy/protoc-gen-validate => github.com/bufbuild/protoc-gen-validate v1.3.0 github.com/lyft/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.3.0 nhooyr.io/websocket => github.com/coder/websocket v1.8.7 -// Local development - uncomment these for local testing -// Comment these lines before releasing -//github.com/LumeraProtocol/lumera => ../lumera + // Local development (monorepo): use local Lumera module. + github.com/LumeraProtocol/lumera => ../lumera ) require ( diff --git a/pkg/lumera/Readme.md b/pkg/lumera/Readme.md index b25d43f3..a636ccda 100644 --- a/pkg/lumera/Readme.md +++ b/pkg/lumera/Readme.md @@ -5,6 +5,7 @@ A minimal guide to the Lumera client What it is - Lightweight client over gRPC with small modules: `Auth`, `Action`, `ActionMsg`, `SuperNode`, `Tx`, `Node`. +- Also includes `Audit` (queries) and `AuditMsg` (tx submission) for `x/audit`. - Shared tx pipeline for building, simulating, signing, and broadcasting messages. Create a client @@ -24,6 +25,8 @@ Using modules - `cli.Action()` – query actions (GetAction, GetActionFee, GetParams) - `cli.ActionMsg()` – send action messages (see below) +- `cli.Audit()` – query `x/audit` (params/epochs/anchors/assignments/reports) +- `cli.AuditMsg()` – submit `x/audit` txs (`MsgSubmitAuditReport`, `MsgSubmitEvidence`) - `cli.Auth()` – accounts/verify - `cli.SuperNode()` – supernode queries - `cli.Tx()` – tx internals (shared by helpers) diff --git a/pkg/lumera/client.go b/pkg/lumera/client.go index bcd93a71..be5cd9bf 100644 --- a/pkg/lumera/client.go +++ b/pkg/lumera/client.go @@ -6,6 +6,8 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" @@ -19,6 +21,8 @@ type lumeraClient struct { authMod auth.Module actionMod action.Module actionMsgMod action_msg.Module + auditMod audit.Module + auditMsgMod audit_msg.Module bankMod bank.Module supernodeMod supernode.Module supernodeMsgMod supernode_msg.Module @@ -52,6 +56,12 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) { return nil, err } + auditModule, err := audit.NewModule(conn.GetConn()) + if err != nil { + conn.Close() + return nil, err + } + supernodeModule, err := supernode.NewModule(conn.GetConn()) if err != nil { conn.Close() @@ -109,11 +119,26 @@ func newClient(ctx context.Context, cfg *Config) (Client, error) { return nil, err } + auditMsgModule, err := audit_msg.NewModule( + conn.GetConn(), + authModule, + txModule, + cfg.keyring, + cfg.KeyName, + cfg.ChainID, + ) + if err != nil { + conn.Close() + return nil, err + } + return &lumeraClient{ cfg: cfg, authMod: authModule, actionMod: actionModule, actionMsgMod: actionMsgModule, + auditMod: auditModule, + auditMsgMod: auditMsgModule, bankMod: bankModule, supernodeMod: supernodeModule, supernodeMsgMod: supernodeMsgModule, @@ -135,6 +160,14 @@ func (c *lumeraClient) ActionMsg() action_msg.Module { return c.actionMsgMod } +func (c *lumeraClient) Audit() audit.Module { + return c.auditMod +} + +func (c *lumeraClient) AuditMsg() audit_msg.Module { + return c.auditMsgMod +} + func (c *lumeraClient) Bank() bank.Module { return c.bankMod } diff --git a/pkg/lumera/interface.go b/pkg/lumera/interface.go index 3cad6635..f660e9c2 100644 --- a/pkg/lumera/interface.go +++ b/pkg/lumera/interface.go @@ -6,6 +6,8 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" @@ -19,6 +21,8 @@ type Client interface { Auth() auth.Module Action() action.Module ActionMsg() action_msg.Module + Audit() audit.Module + AuditMsg() audit_msg.Module SuperNode() supernode.Module SuperNodeMsg() supernode_msg.Module Bank() bank.Module diff --git a/pkg/lumera/lumera_mock.go b/pkg/lumera/lumera_mock.go index b270e6ac..95efcfbf 100644 --- a/pkg/lumera/lumera_mock.go +++ b/pkg/lumera/lumera_mock.go @@ -14,6 +14,8 @@ import ( action "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" action_msg "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" + audit "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit" + audit_msg "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit_msg" auth "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" bank "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" node "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" @@ -75,6 +77,34 @@ func (mr *MockClientMockRecorder) ActionMsg() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ActionMsg", reflect.TypeOf((*MockClient)(nil).ActionMsg)) } +// Audit mocks base method. +func (m *MockClient) Audit() audit.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Audit") + ret0, _ := ret[0].(audit.Module) + return ret0 +} + +// Audit indicates an expected call of Audit. +func (mr *MockClientMockRecorder) Audit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Audit", reflect.TypeOf((*MockClient)(nil).Audit)) +} + +// AuditMsg mocks base method. +func (m *MockClient) AuditMsg() audit_msg.Module { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuditMsg") + ret0, _ := ret[0].(audit_msg.Module) + return ret0 +} + +// AuditMsg indicates an expected call of AuditMsg. +func (mr *MockClientMockRecorder) AuditMsg() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuditMsg", reflect.TypeOf((*MockClient)(nil).AuditMsg)) +} + // Auth mocks base method. func (m *MockClient) Auth() auth.Module { m.ctrl.T.Helper() diff --git a/pkg/lumera/modules/audit/impl.go b/pkg/lumera/modules/audit/impl.go new file mode 100644 index 00000000..7ed1383f --- /dev/null +++ b/pkg/lumera/modules/audit/impl.go @@ -0,0 +1,75 @@ +package audit + +import ( + "context" + "fmt" + + "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "google.golang.org/grpc" +) + +type module struct { + client types.QueryClient +} + +func newModule(conn *grpc.ClientConn) (Module, error) { + if conn == nil { + return nil, fmt.Errorf("connection cannot be nil") + } + return &module{client: types.NewQueryClient(conn)}, nil +} + +func (m *module) GetParams(ctx context.Context) (*types.QueryParamsResponse, error) { + resp, err := m.client.Params(ctx, &types.QueryParamsRequest{}) + if err != nil { + return nil, fmt.Errorf("failed to get audit params: %w", err) + } + return resp, nil +} + +func (m *module) GetEpochAnchor(ctx context.Context, epochID uint64) (*types.QueryEpochAnchorResponse, error) { + resp, err := m.client.EpochAnchor(ctx, &types.QueryEpochAnchorRequest{EpochId: epochID}) + if err != nil { + return nil, fmt.Errorf("failed to get epoch anchor: %w", err) + } + return resp, nil +} + +func (m *module) GetCurrentEpochAnchor(ctx context.Context) (*types.QueryCurrentEpochAnchorResponse, error) { + resp, err := m.client.CurrentEpochAnchor(ctx, &types.QueryCurrentEpochAnchorRequest{}) + if err != nil { + return nil, fmt.Errorf("failed to get current epoch anchor: %w", err) + } + return resp, nil +} + +func (m *module) GetCurrentEpoch(ctx context.Context) (*types.QueryCurrentEpochResponse, error) { + resp, err := m.client.CurrentEpoch(ctx, &types.QueryCurrentEpochRequest{}) + if err != nil { + return nil, fmt.Errorf("failed to get current epoch: %w", err) + } + return resp, nil +} + +func (m *module) GetAssignedTargets(ctx context.Context, supernodeAccount string, epochID uint64) (*types.QueryAssignedTargetsResponse, error) { + resp, err := m.client.AssignedTargets(ctx, &types.QueryAssignedTargetsRequest{ + SupernodeAccount: supernodeAccount, + EpochId: epochID, + FilterByEpochId: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to get assigned targets: %w", err) + } + return resp, nil +} + +func (m *module) GetAuditReport(ctx context.Context, epochID uint64, supernodeAccount string) (*types.QueryAuditReportResponse, error) { + resp, err := m.client.AuditReport(ctx, &types.QueryAuditReportRequest{ + EpochId: epochID, + SupernodeAccount: supernodeAccount, + }) + if err != nil { + return nil, fmt.Errorf("failed to get audit report: %w", err) + } + return resp, nil +} diff --git a/pkg/lumera/modules/audit/interface.go b/pkg/lumera/modules/audit/interface.go new file mode 100644 index 00000000..9d9eb60d --- /dev/null +++ b/pkg/lumera/modules/audit/interface.go @@ -0,0 +1,23 @@ +package audit + +import ( + "context" + + "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "google.golang.org/grpc" +) + +// Module defines the interface for querying the audit module. +type Module interface { + GetParams(ctx context.Context) (*types.QueryParamsResponse, error) + GetEpochAnchor(ctx context.Context, epochID uint64) (*types.QueryEpochAnchorResponse, error) + GetCurrentEpochAnchor(ctx context.Context) (*types.QueryCurrentEpochAnchorResponse, error) + GetCurrentEpoch(ctx context.Context) (*types.QueryCurrentEpochResponse, error) + GetAssignedTargets(ctx context.Context, supernodeAccount string, epochID uint64) (*types.QueryAssignedTargetsResponse, error) + GetAuditReport(ctx context.Context, epochID uint64, supernodeAccount string) (*types.QueryAuditReportResponse, error) +} + +// NewModule creates a new Audit module client. +func NewModule(conn *grpc.ClientConn) (Module, error) { + return newModule(conn) +} diff --git a/pkg/lumera/modules/audit_msg/audit_msg_mock.go b/pkg/lumera/modules/audit_msg/audit_msg_mock.go new file mode 100644 index 00000000..7ce8be97 --- /dev/null +++ b/pkg/lumera/modules/audit_msg/audit_msg_mock.go @@ -0,0 +1,73 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go +// +// Generated by this command: +// +// mockgen -destination=audit_msg_mock.go -package=audit_msg -source=interface.go +// + +// Package audit_msg is a generated GoMock package. +package audit_msg + +import ( + context "context" + reflect "reflect" + + types "github.com/LumeraProtocol/lumera/x/audit/v1/types" + tx "github.com/cosmos/cosmos-sdk/types/tx" + gomock "go.uber.org/mock/gomock" +) + +// MockModule is a mock of Module interface. +type MockModule struct { + ctrl *gomock.Controller + recorder *MockModuleMockRecorder + isgomock struct{} +} + +// MockModuleMockRecorder is the mock recorder for MockModule. +type MockModuleMockRecorder struct { + mock *MockModule +} + +// NewMockModule creates a new mock instance. +func NewMockModule(ctrl *gomock.Controller) *MockModule { + mock := &MockModule{ctrl: ctrl} + mock.recorder = &MockModuleMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockModule) EXPECT() *MockModuleMockRecorder { + return m.recorder +} + +// SubmitEvidence mocks base method. +func (m *MockModule) SubmitEvidence(ctx context.Context, subjectAddress string, evidenceType types.EvidenceType, actionID, metadataJSON string) (*tx.BroadcastTxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitEvidence", ctx, subjectAddress, evidenceType, actionID, metadataJSON) + ret0, _ := ret[0].(*tx.BroadcastTxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitEvidence indicates an expected call of SubmitEvidence. +func (mr *MockModuleMockRecorder) SubmitEvidence(ctx, subjectAddress, evidenceType, actionID, metadataJSON any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitEvidence", reflect.TypeOf((*MockModule)(nil).SubmitEvidence), ctx, subjectAddress, evidenceType, actionID, metadataJSON) +} + +// SubmitAuditReport mocks base method. +func (m *MockModule) SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*types.AuditPeerObservation) (*tx.BroadcastTxResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubmitAuditReport", ctx, epochID, peerObservations) + ret0, _ := ret[0].(*tx.BroadcastTxResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SubmitAuditReport indicates an expected call of SubmitAuditReport. +func (mr *MockModuleMockRecorder) SubmitAuditReport(ctx, epochID, peerObservations any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAuditReport", reflect.TypeOf((*MockModule)(nil).SubmitAuditReport), ctx, epochID, peerObservations) +} diff --git a/pkg/lumera/modules/audit_msg/impl.go b/pkg/lumera/modules/audit_msg/impl.go new file mode 100644 index 00000000..bf0fae46 --- /dev/null +++ b/pkg/lumera/modules/audit_msg/impl.go @@ -0,0 +1,101 @@ +package audit_msg + +import ( + "context" + "fmt" + "strings" + "sync" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdktypes "github.com/cosmos/cosmos-sdk/types" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" + "google.golang.org/grpc" +) + +type module struct { + client audittypes.MsgClient + txHelper *txmod.TxHelper + mu sync.Mutex +} + +func newModule( + conn *grpc.ClientConn, + authmodule auth.Module, + txmodule txmod.Module, + kr keyring.Keyring, + keyName string, + chainID string, +) (Module, error) { + if conn == nil { + return nil, fmt.Errorf("connection cannot be nil") + } + if authmodule == nil { + return nil, fmt.Errorf("auth module cannot be nil") + } + if txmodule == nil { + return nil, fmt.Errorf("tx module cannot be nil") + } + if kr == nil { + return nil, fmt.Errorf("keyring cannot be nil") + } + if strings.TrimSpace(keyName) == "" { + return nil, fmt.Errorf("key name cannot be empty") + } + if strings.TrimSpace(chainID) == "" { + return nil, fmt.Errorf("chain ID cannot be empty") + } + + return &module{ + client: audittypes.NewMsgClient(conn), + txHelper: txmod.NewTxHelperWithDefaults(authmodule, txmodule, chainID, keyName, kr), + }, nil +} + +func (m *module) SubmitEvidence(ctx context.Context, subjectAddress string, evidenceType audittypes.EvidenceType, actionID string, metadataJSON string) (*sdktx.BroadcastTxResponse, error) { + subjectAddress = strings.TrimSpace(subjectAddress) + if subjectAddress == "" { + return nil, fmt.Errorf("subject address cannot be empty") + } + metadataJSON = strings.TrimSpace(metadataJSON) + if metadataJSON == "" { + return nil, fmt.Errorf("metadata cannot be empty") + } + + m.mu.Lock() + defer m.mu.Unlock() + + return m.txHelper.ExecuteTransaction(ctx, func(creator string) (sdktypes.Msg, error) { + return &audittypes.MsgSubmitEvidence{ + Creator: creator, + SubjectAddress: subjectAddress, + EvidenceType: evidenceType, + ActionId: actionID, + Metadata: metadataJSON, + }, nil + }) +} + +func (m *module) SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*audittypes.AuditPeerObservation) (*sdktx.BroadcastTxResponse, error) { + m.mu.Lock() + defer m.mu.Unlock() + + // Intentionally submit 0% usage for CPU/memory/disk so the chain treats these as "unknown" + // (see x/audit enforcement semantics). + selfReport := audittypes.AuditSelfReport{ + CpuUsagePercent: 0, + MemUsagePercent: 0, + DiskUsagePercent: 0, + } + + return m.txHelper.ExecuteTransaction(ctx, func(creator string) (sdktypes.Msg, error) { + return &audittypes.MsgSubmitAuditReport{ + SupernodeAccount: creator, + EpochId: epochID, + SelfReport: selfReport, + PeerObservations: peerObservations, + }, nil + }) +} diff --git a/pkg/lumera/modules/audit_msg/interface.go b/pkg/lumera/modules/audit_msg/interface.go new file mode 100644 index 00000000..d8fe6a17 --- /dev/null +++ b/pkg/lumera/modules/audit_msg/interface.go @@ -0,0 +1,31 @@ +//go:generate go run go.uber.org/mock/mockgen -destination=audit_msg_mock.go -package=audit_msg -source=interface.go +package audit_msg + +import ( + "context" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" + txmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/tx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdktx "github.com/cosmos/cosmos-sdk/types/tx" + "google.golang.org/grpc" +) + +// Module defines the interface for audit-related transactions. +type Module interface { + SubmitEvidence(ctx context.Context, subjectAddress string, evidenceType audittypes.EvidenceType, actionID string, metadataJSON string) (*sdktx.BroadcastTxResponse, error) + SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*audittypes.AuditPeerObservation) (*sdktx.BroadcastTxResponse, error) +} + +// NewModule creates a new audit_msg module instance. +func NewModule( + conn *grpc.ClientConn, + authmodule auth.Module, + txmodule txmod.Module, + kr keyring.Keyring, + keyName string, + chainID string, +) (Module, error) { + return newModule(conn, authmodule, txmodule, kr, keyName, chainID) +} diff --git a/pkg/storagechallenge/deterministic/deterministic.go b/pkg/storagechallenge/deterministic/deterministic.go new file mode 100644 index 00000000..6f9afec3 --- /dev/null +++ b/pkg/storagechallenge/deterministic/deterministic.go @@ -0,0 +1,160 @@ +package deterministic + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "sort" + "strconv" + + "github.com/btcsuite/btcutil/base58" + "lukechampine.com/blake3" +) + +type xorCandidate struct { + id string + dist [32]byte +} + +func EpochID(height int64, epochZeroHeight uint64, epochLengthBlocks uint64) (uint64, bool) { + if epochLengthBlocks == 0 { + return 0, false + } + if height < int64(epochZeroHeight) { + return 0, false + } + return uint64(height-int64(epochZeroHeight)) / epochLengthBlocks, true +} + +func EpochStartHeight(epochID uint64, epochZeroHeight uint64, epochLengthBlocks uint64) int64 { + return int64(epochZeroHeight) + int64(epochID)*int64(epochLengthBlocks) +} + +func ComparisonTargetForChallengers(seed []byte, epochID uint64) string { + return "sc:challengers:" + hex.EncodeToString(seed) + ":" + strconv.FormatUint(epochID, 10) +} + +func ChallengerCount(nActive int, requested uint32) int { + if nActive <= 0 { + return 0 + } + if requested == 0 { + // auto = ceil(N/3), minimum 1 + return maxInt(1, (nActive+2)/3) + } + if int(requested) > nActive { + return nActive + } + return int(requested) +} + +func SelectTopByXORDistance(ids []string, target []byte, k int) []string { + if k <= 0 || len(ids) == 0 { + return nil + } + targetHash := ensureHashedTarget(target) + + candidates := make([]xorCandidate, 0, len(ids)) + for _, id := range ids { + idHash := blake3.Sum256([]byte(id)) + var dist [32]byte + for i := 0; i < 32; i++ { + dist[i] = idHash[i] ^ targetHash[i] + } + candidates = append(candidates, xorCandidate{id: id, dist: dist}) + } + + sort.Slice(candidates, func(i, j int) bool { + cmp := bytes.Compare(candidates[i].dist[:], candidates[j].dist[:]) + if cmp != 0 { + return cmp < 0 + } + return candidates[i].id < candidates[j].id + }) + + if k > len(candidates) { + k = len(candidates) + } + out := make([]string, 0, k) + for i := 0; i < k; i++ { + out = append(out, candidates[i].id) + } + return out +} + +func SelectChallengers(activeIDs []string, seed []byte, epochID uint64, requested uint32) []string { + k := ChallengerCount(len(activeIDs), requested) + target := ComparisonTargetForChallengers(seed, epochID) + return SelectTopByXORDistance(activeIDs, []byte(target), k) +} + +func SelectReplicaSet(activeIDs []string, fileKeyBase58 string, replicaCount uint32) ([]string, error) { + if replicaCount == 0 { + return nil, nil + } + target := base58.Decode(fileKeyBase58) + if len(target) == 0 { + return nil, fmt.Errorf("invalid base58 file_key") + } + k := int(replicaCount) + return SelectTopByXORDistance(activeIDs, target, k), nil +} + +func SelectFileKeys(candidateKeys []string, seed []byte, epochID uint64, challengerID string, count uint32) []string { + if count == 0 || len(candidateKeys) == 0 { + return nil + } + + keys := append([]string(nil), candidateKeys...) + sort.Strings(keys) + + want := int(count) + if want > len(keys) { + want = len(keys) + } + + out := make([]string, 0, want) + seedHex := hex.EncodeToString(seed) + + for i := 0; i < want; i++ { + msg := []byte("sc:files:" + challengerID + ":" + strconv.FormatUint(epochID, 10) + ":" + seedHex + ":" + strconv.Itoa(i)) + sum := blake3.Sum256(msg) + idx := int(binary.BigEndian.Uint64(sum[0:8]) % uint64(len(keys))) + out = append(out, keys[idx]) + + // remove selected key (stable, deterministic) + keys = append(keys[:idx], keys[idx+1:]...) + if len(keys) == 0 { + break + } + } + + return out +} + +func DeterministicJitterMs(seed []byte, epochID uint64, challengerID string, maxJitterMs uint64) uint64 { + if maxJitterMs == 0 { + return 0 + } + msg := []byte("sc:jitter:" + hex.EncodeToString(seed) + ":" + strconv.FormatUint(epochID, 10) + ":" + challengerID) + sum := blake3.Sum256(msg) + v := binary.BigEndian.Uint64(sum[0:8]) + return v % maxJitterMs +} + +func ensureHashedTarget(target []byte) [32]byte { + if len(target) == 32 { + var out [32]byte + copy(out[:], target) + return out + } + return blake3.Sum256(target) +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/pkg/storagechallenge/deterministic/deterministic_test.go b/pkg/storagechallenge/deterministic/deterministic_test.go new file mode 100644 index 00000000..3558ad03 --- /dev/null +++ b/pkg/storagechallenge/deterministic/deterministic_test.go @@ -0,0 +1,46 @@ +package deterministic + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSelectChallengers_Deterministic(t *testing.T) { + seed, err := hex.DecodeString("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f") + require.NoError(t, err) + + active := []string{ + "lumera1aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "lumera1bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "lumera1cccccccccccccccccccccccccccccccccccccc", + "lumera1dddddddddddddddddddddddddddddddddddddd", + "lumera1eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", + "lumera1ffffffffffffffffffffffffffffffffffffff", + } + + c1 := SelectChallengers(active, seed, 7, 0) + c2 := SelectChallengers(active, seed, 7, 0) + require.Equal(t, c1, c2) + + // auto=ceil(6/3)=2 + require.Len(t, c1, 2) + require.NotEqual(t, c1[0], c1[1]) +} + +func TestSelectFileKeys_NoDuplicates(t *testing.T) { + seed, err := hex.DecodeString("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + require.NoError(t, err) + + candidates := []string{"k3", "k1", "k2", "k4"} + out := SelectFileKeys(candidates, seed, 0, "lumera1id", 3) + require.Len(t, out, 3) + + seen := map[string]struct{}{} + for _, k := range out { + _, ok := seen[k] + require.False(t, ok) + seen[k] = struct{}{} + } +} diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go index 27a094cb..c9d287d3 100644 --- a/pkg/testutil/lumera.go +++ b/pkg/testutil/lumera.go @@ -4,10 +4,13 @@ import ( "context" "github.com/LumeraProtocol/lumera/x/action/v1/types" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" supernodeTypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" bankmod "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" @@ -29,6 +32,8 @@ type MockLumeraClient struct { authMod *MockAuthModule actionMod *MockActionModule actionMsgMod *MockActionMsgModule + auditMod *MockAuditModule + auditMsgMod *MockAuditMsgModule bankMod *MockBankModule supernodeMod *MockSupernodeModule supernodeMsgMod supernode_msg.Module @@ -42,6 +47,8 @@ type MockLumeraClient struct { func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, error) { actionMod := &MockActionModule{} actionMsgMod := &MockActionMsgModule{} + auditMod := &MockAuditModule{} + auditMsgMod := &MockAuditMsgModule{} bankMod := &MockBankModule{} supernodeMod := &MockSupernodeModule{addresses: addresses} supernodeMsgMod := &MockSupernodeMsgModule{} @@ -52,6 +59,8 @@ func NewMockLumeraClient(kr keyring.Keyring, addresses []string) (lumera.Client, authMod: &MockAuthModule{}, actionMod: actionMod, actionMsgMod: actionMsgMod, + auditMod: auditMod, + auditMsgMod: auditMsgMod, bankMod: bankMod, supernodeMod: supernodeMod, supernodeMsgMod: supernodeMsgMod, @@ -77,6 +86,14 @@ func (c *MockLumeraClient) ActionMsg() action_msg.Module { return c.actionMsgMod } +func (c *MockLumeraClient) Audit() audit.Module { + return c.auditMod +} + +func (c *MockLumeraClient) AuditMsg() audit_msg.Module { + return c.auditMsgMod +} + // Bank returns the Bank module client func (c *MockLumeraClient) Bank() bankmod.Module { return c.bankMod @@ -174,6 +191,42 @@ func (m *MockActionMsgModule) SimulateFinalizeCascadeAction(ctx context.Context, return &sdktx.SimulateResponse{}, nil } +type MockAuditModule struct{} + +func (m *MockAuditModule) GetParams(ctx context.Context) (*audittypes.QueryParamsResponse, error) { + return &audittypes.QueryParamsResponse{}, nil +} + +func (m *MockAuditModule) GetEpochAnchor(ctx context.Context, epochID uint64) (*audittypes.QueryEpochAnchorResponse, error) { + return &audittypes.QueryEpochAnchorResponse{}, nil +} + +func (m *MockAuditModule) GetCurrentEpochAnchor(ctx context.Context) (*audittypes.QueryCurrentEpochAnchorResponse, error) { + return &audittypes.QueryCurrentEpochAnchorResponse{}, nil +} + +func (m *MockAuditModule) GetCurrentEpoch(ctx context.Context) (*audittypes.QueryCurrentEpochResponse, error) { + return &audittypes.QueryCurrentEpochResponse{}, nil +} + +func (m *MockAuditModule) GetAssignedTargets(ctx context.Context, supernodeAccount string, epochID uint64) (*audittypes.QueryAssignedTargetsResponse, error) { + return &audittypes.QueryAssignedTargetsResponse{}, nil +} + +func (m *MockAuditModule) GetAuditReport(ctx context.Context, epochID uint64, supernodeAccount string) (*audittypes.QueryAuditReportResponse, error) { + return &audittypes.QueryAuditReportResponse{}, nil +} + +type MockAuditMsgModule struct{} + +func (m *MockAuditMsgModule) SubmitEvidence(ctx context.Context, subjectAddress string, evidenceType audittypes.EvidenceType, actionID string, metadataJSON string) (*sdktx.BroadcastTxResponse, error) { + return &sdktx.BroadcastTxResponse{}, nil +} + +func (m *MockAuditMsgModule) SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*audittypes.AuditPeerObservation) (*sdktx.BroadcastTxResponse, error) { + return &sdktx.BroadcastTxResponse{}, nil +} + // MockSupernodeModule implements the supernode.Module interface for testing type MockSupernodeModule struct { addresses []string diff --git a/proto/supernode/storage_challenge.proto b/proto/supernode/storage_challenge.proto new file mode 100644 index 00000000..6494787c --- /dev/null +++ b/proto/supernode/storage_challenge.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package supernode; +option go_package = "github.com/LumeraProtocol/supernode/v2/gen/supernode"; + +// StorageChallengeService exposes minimal RPCs required to run epoch-anchored storage challenges off-chain. +service StorageChallengeService { + rpc GetSliceProof(GetSliceProofRequest) returns (GetSliceProofResponse) {} + rpc VerifySliceProof(VerifySliceProofRequest) returns (VerifySliceProofResponse) {} +} + +message GetSliceProofRequest { + string challenge_id = 1; + uint64 epoch_id = 2; + bytes seed = 3; + + string file_key = 4; + uint64 requested_start = 5; + uint64 requested_end = 6; + + string challenger_id = 7; + string recipient_id = 8; + repeated string observer_ids = 9; +} + +message GetSliceProofResponse { + string challenge_id = 1; + uint64 epoch_id = 2; + + string file_key = 3; + uint64 start = 4; + uint64 end = 5; + + string recipient_id = 6; + bytes slice = 7; + string proof_hash_hex = 8; + + bool ok = 9; + string error = 10; +} + +message VerifySliceProofRequest { + string challenge_id = 1; + uint64 epoch_id = 2; + string file_key = 3; + uint64 start = 4; + uint64 end = 5; + bytes slice = 6; + string proof_hash_hex = 7; + string challenger_id = 8; + string recipient_id = 9; +} + +message VerifySliceProofResponse { + string challenge_id = 1; + uint64 epoch_id = 2; + string observer_id = 3; + bool ok = 4; + string error = 5; +} + diff --git a/supernode/audit_reporter/service.go b/supernode/audit_reporter/service.go new file mode 100644 index 00000000..b00f9395 --- /dev/null +++ b/supernode/audit_reporter/service.go @@ -0,0 +1,244 @@ +package audit_reporter + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "time" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/reachability" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + defaultPollInterval = 5 * time.Second + defaultDialTimeout = 2 * time.Second + + maxConcurrentTargets = 8 +) + +// Service submits one MsgSubmitAuditReport per epoch for the local supernode. +// All runtime behavior is driven by on-chain params/queries; there are no local config knobs. +type Service struct { + identity string + + lumera lumera.Client + keyring keyring.Keyring + keyName string + + pollInterval time.Duration + dialTimeout time.Duration +} + +func NewService(identity string, lumeraClient lumera.Client, kr keyring.Keyring, keyName string) (*Service, error) { + identity = strings.TrimSpace(identity) + if identity == "" { + return nil, fmt.Errorf("identity is empty") + } + if lumeraClient == nil || lumeraClient.Audit() == nil || lumeraClient.AuditMsg() == nil || lumeraClient.SuperNode() == nil || lumeraClient.Node() == nil { + return nil, fmt.Errorf("lumera client is missing required modules") + } + if kr == nil { + return nil, fmt.Errorf("keyring is nil") + } + keyName = strings.TrimSpace(keyName) + if keyName == "" { + return nil, fmt.Errorf("key name is empty") + } + + // Defensive: ensure the configured identity matches the local signing key address. + key, err := kr.Key(keyName) + if err != nil { + return nil, fmt.Errorf("keyring key not found: %w", err) + } + addr, err := key.GetAddress() + if err != nil { + return nil, fmt.Errorf("get key address: %w", err) + } + if got := addr.String(); got != identity { + return nil, fmt.Errorf("identity mismatch: config.identity=%s key(%s)=%s", identity, keyName, got) + } + + return &Service{ + identity: identity, + lumera: lumeraClient, + keyring: kr, + keyName: keyName, + pollInterval: defaultPollInterval, + dialTimeout: defaultDialTimeout, + }, nil +} + +func (s *Service) Run(ctx context.Context) error { + ticker := time.NewTicker(s.pollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + s.tick(ctx) + } + } +} + +func (s *Service) tick(ctx context.Context) { + epochResp, err := s.lumera.Audit().GetCurrentEpoch(ctx) + if err != nil || epochResp == nil { + return + } + epochID := epochResp.EpochId + reachability.SetCurrentEpochID(epochID) + + anchorResp, err := s.lumera.Audit().GetEpochAnchor(ctx, epochID) + if err != nil || anchorResp == nil || anchorResp.Anchor.EpochId != epochID { + // Anchor may not be committed yet at the epoch boundary; retry on next tick. + return + } + + // Idempotency: if a report exists for this epoch, do nothing. + if _, err := s.lumera.Audit().GetAuditReport(ctx, epochID, s.identity); err == nil { + return + } else if status.Code(err) != codes.NotFound { + return + } + + assignResp, err := s.lumera.Audit().GetAssignedTargets(ctx, s.identity, epochID) + if err != nil || assignResp == nil { + return + } + + peerObservations := s.buildPeerObservations(ctx, epochID, assignResp.RequiredOpenPorts, assignResp.TargetSupernodeAccounts) + + if _, err := s.lumera.AuditMsg().SubmitAuditReport(ctx, epochID, peerObservations); err != nil { + logtrace.Warn(ctx, "audit report submit failed", logtrace.Fields{ + "epoch_id": epochID, + "error": err.Error(), + }) + return + } + + logtrace.Info(ctx, "audit report submitted", logtrace.Fields{ + "epoch_id": epochID, + "peer_observations_count": len(peerObservations), + }) +} + +func (s *Service) buildPeerObservations(ctx context.Context, epochID uint64, requiredOpenPorts []uint32, targets []string) []*audittypes.AuditPeerObservation { + if len(targets) == 0 { + return nil + } + + out := make([]*audittypes.AuditPeerObservation, len(targets)) + + type workItem struct { + index int + target string + } + + work := make(chan workItem) + done := make(chan struct{}) + + worker := func() { + defer func() { done <- struct{}{} }() + for item := range work { + out[item.index] = s.observeTarget(ctx, epochID, requiredOpenPorts, item.target) + } + } + + workers := maxConcurrentTargets + if workers > len(targets) { + workers = len(targets) + } + for i := 0; i < workers; i++ { + go worker() + } + + for i, t := range targets { + work <- workItem{index: i, target: t} + } + close(work) + + for i := 0; i < workers; i++ { + <-done + } + + // ensure no nil elements (MsgSubmitAuditReport rejects nil observations) + final := make([]*audittypes.AuditPeerObservation, 0, len(out)) + for i := range out { + if out[i] != nil { + final = append(final, out[i]) + } + } + return final +} + +func (s *Service) observeTarget(ctx context.Context, epochID uint64, requiredOpenPorts []uint32, target string) *audittypes.AuditPeerObservation { + target = strings.TrimSpace(target) + if target == "" { + return nil + } + + host, err := s.targetHost(ctx, target) + if err != nil { + logtrace.Warn(ctx, "audit observe target: resolve host failed", logtrace.Fields{ + "epoch_id": epochID, + "target": target, + "error": err.Error(), + }) + host = "" + } + + portStates := make([]audittypes.PortState, 0, len(requiredOpenPorts)) + for _, p := range requiredOpenPorts { + portStates = append(portStates, probeTCP(ctx, host, p, s.dialTimeout)) + } + + return &audittypes.AuditPeerObservation{ + TargetSupernodeAccount: target, + PortStates: portStates, + } +} + +func (s *Service) targetHost(ctx context.Context, supernodeAccount string) (string, error) { + info, err := s.lumera.SuperNode().GetSupernodeWithLatestAddress(ctx, supernodeAccount) + if err != nil || info == nil { + return "", fmt.Errorf("resolve supernode address: %w", err) + } + raw := strings.TrimSpace(info.LatestAddress) + if raw == "" { + return "", fmt.Errorf("empty latest address for %s", supernodeAccount) + } + + // LatestAddress is expected to be an IP/host, but tolerate host:port. + if host, _, splitErr := net.SplitHostPort(raw); splitErr == nil && host != "" { + return host, nil + } + return raw, nil +} + +func probeTCP(ctx context.Context, host string, port uint32, timeout time.Duration) audittypes.PortState { + host = strings.TrimSpace(host) + if host == "" { + return audittypes.PortState_PORT_STATE_UNKNOWN + } + if port == 0 || port > 65535 { + return audittypes.PortState_PORT_STATE_UNKNOWN + } + + d := net.Dialer{Timeout: timeout} + conn, err := d.DialContext(ctx, "tcp", net.JoinHostPort(host, strconv.FormatUint(uint64(port), 10))) + if err != nil { + return audittypes.PortState_PORT_STATE_CLOSED + } + _ = conn.Close() + return audittypes.PortState_PORT_STATE_OPEN +} diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 6a900a06..09f0d0f3 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -18,15 +18,21 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/lumera" grpcserver "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/server" "github.com/LumeraProtocol/supernode/v2/pkg/reachability" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/task" + auditReporterService "github.com/LumeraProtocol/supernode/v2/supernode/audit_reporter" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "github.com/LumeraProtocol/supernode/v2/supernode/config" statusService "github.com/LumeraProtocol/supernode/v2/supernode/status" - supernodeMetrics "github.com/LumeraProtocol/supernode/v2/supernode/supernode_metrics" + storageChallengeService "github.com/LumeraProtocol/supernode/v2/supernode/storage_challenge" + // Legacy supernode metrics reporter (MsgReportSupernodeMetrics) has been superseded by + // epoch-scoped audit reporting in `x/audit`. + // supernodeMetrics "github.com/LumeraProtocol/supernode/v2/supernode/supernode_metrics" "github.com/LumeraProtocol/supernode/v2/supernode/transport/gateway" cascadeRPC "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/cascade" server "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/status" + storageChallengeRPC "github.com/LumeraProtocol/supernode/v2/supernode/transport/grpc/storage_challenge" "github.com/LumeraProtocol/supernode/v2/supernode/verifier" cKeyring "github.com/cosmos/cosmos-sdk/crypto/keyring" @@ -81,8 +87,9 @@ The supernode will connect to the Lumera network and begin participating in the // Reachability evidence store (used for open_ports inference). reachability.SetDefaultStore(reachability.NewStore()) - // Epoch tracker (Option A): mark per-service inbound evidence per chain epoch. - // The current epoch ID is set periodically by the metrics collector. + // Epoch tracker: mark per-service inbound evidence per chain epoch (best-effort). + // If no component sets the current epoch ID, reachability evidence is still recorded + // but is not bucketed by epoch. reachability.SetDefaultEpochTracker(reachability.NewEpochTracker(8)) // W+2 with W=6 default // Verify config matches chain registration before starting services @@ -155,17 +162,56 @@ The supernode will connect to the Lumera network and begin participating in the // Create supernode status service with injected tracker statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig, tr) - metricsCollector := supernodeMetrics.NewCollector( - statusSvc, - lumeraClient, + auditReporter, err := auditReporterService.NewService( appConfig.SupernodeConfig.Identity, - Version, + lumeraClient, kr, - appConfig.SupernodeConfig.Port, - appConfig.P2PConfig.Port, - appConfig.SupernodeConfig.GatewayPort, + appConfig.SupernodeConfig.KeyName, ) - logtrace.Info(ctx, "Metrics collection enabled", logtrace.Fields{}) + if err != nil { + logtrace.Fatal(ctx, "Failed to initialize audit reporter", logtrace.Fields{"error": err.Error()}) + } + + // Legacy on-chain supernode metrics reporting has been superseded by `x/audit`. + // metricsCollector := supernodeMetrics.NewCollector( + // statusSvc, + // lumeraClient, + // appConfig.SupernodeConfig.Identity, + // Version, + // kr, + // appConfig.SupernodeConfig.Port, + // appConfig.P2PConfig.Port, + // appConfig.SupernodeConfig.GatewayPort, + // ) + // logtrace.Info(ctx, "Metrics collection enabled", logtrace.Fields{}) + + // Storage challenge history DB (shared by the gRPC handler and runner). + historyStore, err := queries.OpenHistoryDB() + if err != nil { + logtrace.Fatal(ctx, "Failed to open history DB", logtrace.Fields{"error": err.Error()}) + } + + storageChallengeServer := storageChallengeRPC.NewServer(appConfig.SupernodeConfig.Identity, p2pService, historyStore) + var storageChallengeRunner *storageChallengeService.Service + if appConfig.StorageChallengeConfig.Enabled { + storageChallengeRunner, err = storageChallengeService.NewService( + appConfig.SupernodeConfig.Identity, + appConfig.SupernodeConfig.Port, + lumeraClient, + p2pService, + kr, + historyStore, + storageChallengeService.Config{ + Enabled: true, + PollInterval: time.Duration(appConfig.StorageChallengeConfig.PollIntervalMs) * time.Millisecond, + SubmitEvidence: appConfig.StorageChallengeConfig.SubmitEvidence, + KeyName: appConfig.SupernodeConfig.KeyName, + }, + ) + if err != nil { + logtrace.Fatal(ctx, "Failed to initialize storage challenge runner", logtrace.Fields{"error": err.Error()}) + } + } // Create supernode server supernodeServer := server.NewSupernodeServer(statusSvc) @@ -180,6 +226,7 @@ The supernode will connect to the Lumera network and begin participating in the lumeraClient, grpcserver.ServiceDesc{Desc: &pbcascade.CascadeService_ServiceDesc, Service: cascadeActionServer}, grpcserver.ServiceDesc{Desc: &pbsupernode.SupernodeService_ServiceDesc, Service: supernodeServer}, + grpcserver.ServiceDesc{Desc: &pbsupernode.StorageChallengeService_ServiceDesc, Service: storageChallengeServer}, ) if err != nil { logtrace.Fatal(ctx, "Failed to create gRPC server", logtrace.Fields{"error": err.Error()}) @@ -200,7 +247,10 @@ The supernode will connect to the Lumera network and begin participating in the // Start the services using the standard runner and capture exit servicesErr := make(chan error, 1) go func() { - services := []service{grpcServer, cService, p2pService, gatewayServer, metricsCollector} + services := []service{grpcServer, cService, p2pService, gatewayServer, auditReporter} + if storageChallengeRunner != nil { + services = append(services, storageChallengeRunner) + } servicesErr <- RunServices(ctx, services...) }() @@ -237,6 +287,7 @@ The supernode will connect to the Lumera network and begin participating in the } }() grpcServer.Close() + historyStore.CloseHistoryDB(context.Background()) // Close Lumera client without blocking shutdown logtrace.Debug(ctx, "Closing Lumera client", logtrace.Fields{}) diff --git a/supernode/config.yml b/supernode/config.yml index 35d888a3..350650e2 100644 --- a/supernode/config.yml +++ b/supernode/config.yml @@ -26,3 +26,9 @@ lumera: # RaptorQ Configuration raptorq: files_dir: "raptorq_files" + +# Storage Challenge Configuration +storage_challenge: + enabled: true + poll_interval_ms: 5000 + submit_evidence: true diff --git a/supernode/config/config.go b/supernode/config/config.go index d655391c..92d0de7c 100644 --- a/supernode/config/config.go +++ b/supernode/config/config.go @@ -47,12 +47,19 @@ type LogConfig struct { Level string `yaml:"level"` } +type StorageChallengeConfig struct { + Enabled bool `yaml:"enabled"` + PollIntervalMs uint64 `yaml:"poll_interval_ms,omitempty"` + SubmitEvidence bool `yaml:"submit_evidence,omitempty"` +} + type Config struct { - SupernodeConfig `yaml:"supernode"` - KeyringConfig `yaml:"keyring"` - P2PConfig `yaml:"p2p"` - LumeraClientConfig `yaml:"lumera"` - RaptorQConfig `yaml:"raptorq"` + SupernodeConfig `yaml:"supernode"` + KeyringConfig `yaml:"keyring"` + P2PConfig `yaml:"p2p"` + LumeraClientConfig `yaml:"lumera"` + RaptorQConfig `yaml:"raptorq"` + StorageChallengeConfig `yaml:"storage_challenge"` // Store base directory (not from YAML) BaseDir string `yaml:"-"` @@ -142,6 +149,11 @@ func LoadConfig(filename string, baseDir string) (*Config, error) { // Set the base directory config.BaseDir = baseDir + // Apply storage challenge defaults. + if config.StorageChallengeConfig.PollIntervalMs == 0 { + config.StorageChallengeConfig.PollIntervalMs = DefaultStorageChallengePollIntervalMs + } + // Create directories if err := config.EnsureDirs(); err != nil { return nil, err diff --git a/supernode/config/defaults.go b/supernode/config/defaults.go index d7915259..e2cbedc7 100644 --- a/supernode/config/defaults.go +++ b/supernode/config/defaults.go @@ -3,13 +3,14 @@ package config // Centralized default values for configuration const ( - DefaultKeyringBackend = "test" - DefaultKeyringDir = "keys" - DefaultKeyName = "test-key" - DefaultSupernodeHost = "0.0.0.0" - DefaultSupernodePort = 4444 - DefaultP2PPort = 4445 - DefaultLumeraGRPC = "localhost:9090" - DefaultChainID = "testing" - DefaultRaptorQFilesDir = "raptorq_files" + DefaultKeyringBackend = "test" + DefaultKeyringDir = "keys" + DefaultKeyName = "test-key" + DefaultSupernodeHost = "0.0.0.0" + DefaultSupernodePort = 4444 + DefaultP2PPort = 4445 + DefaultLumeraGRPC = "localhost:9090" + DefaultChainID = "testing" + DefaultRaptorQFilesDir = "raptorq_files" + DefaultStorageChallengePollIntervalMs = 5000 ) diff --git a/supernode/config/save.go b/supernode/config/save.go index d93e6cb8..dfa88b7f 100644 --- a/supernode/config/save.go +++ b/supernode/config/save.go @@ -57,5 +57,10 @@ func CreateDefaultConfig(keyName, identity, chainID string, keyringBackend, keyr P2PConfig: P2PConfig{Port: DefaultP2PPort, DataDir: "data/p2p"}, LumeraClientConfig: LumeraClientConfig{GRPCAddr: DefaultLumeraGRPC, ChainID: chainID}, RaptorQConfig: RaptorQConfig{FilesDir: DefaultRaptorQFilesDir}, + StorageChallengeConfig: StorageChallengeConfig{ + Enabled: true, + PollIntervalMs: DefaultStorageChallengePollIntervalMs, + SubmitEvidence: true, + }, } } diff --git a/supernode/storage_challenge/README.md b/supernode/storage_challenge/README.md new file mode 100644 index 00000000..99d74a65 --- /dev/null +++ b/supernode/storage_challenge/README.md @@ -0,0 +1,51 @@ +# Storage Challenge (Supernode) + +This package implements the Supernode side of the Storage Challenge protocol. The chain side lives in `lumera/x/audit/v1` +and provides deterministic epoch cadence (`epoch_id`) and a per-epoch `EpochAnchor` record (seed + frozen eligible sets). + +## High-level flow + +1. A background service runs on each Supernode (see `Service.Run`). +2. On each tick: + - query latest chain height, + - query `x/audit` params (and validate them), + - derive `epoch_id` deterministically from `(height, epoch_zero_height, epoch_length_blocks)`, + - fetch `EpochAnchor(epoch_id)` (retrying when the anchor is not committed yet at an epoch boundary), + - deterministically decide if this node is a challenger for the epoch, + - if challenger, select a bounded set of local file keys and run challenges. + +## Deterministic selection + +Selection must match the chain rules. The service uses: +- `EpochAnchor.Seed` and `epoch_id` for deterministic selection/jitter, +- `EpochAnchor.ActiveSupernodeAccounts` for challenger selection, +- sorted local file keys to ensure stable file-key selection. + +## Challenge execution + +For each selected `file_key`: +1. Select the replica set deterministically from the active set. +2. Pick a recipient and observers (excluding self). +3. Request a proof slice from the recipient via gRPC (`GetSliceProof`). +4. Validate the proof hash locally. +5. Ask observers to verify the proof (`VerifySliceProof`) and enforce quorum (if configured). + +## Evidence submission + +On failure conditions (recipient error/unreachable, invalid proof, observer quorum failure), and only when enabled: +- Build `StorageChallengeFailureEvidenceMetadata` JSON. +- Enforce `sc_evidence_max_bytes` locally before submission. +- Submit `MsgSubmitEvidence` to `x/audit` using the Supernode’s Cosmos key. + +Evidence is chain-verifiable at the policy/assignment level (epoch anchor + deterministic rules), while full transcripts remain +off-chain (supernode logs and local stores). + +## Notes / operational behavior + +- Idempotency: the service tracks `lastRunEpoch` and runs at most once per epoch (per process) once it determines whether + it is a challenger for that epoch. +- Anchor boundary: it is expected that `EpochAnchor(epoch_id)` may not be queryable immediately at an epoch boundary; the + service retries on the next tick. +- Candidate key lookback: lookback is computed as `lookback_epochs * estimated_epoch_duration`, where epoch duration is + estimated from recent blocks. If estimation fails, it falls back to `24h * lookback_epochs`. + diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go new file mode 100644 index 00000000..2dbdb145 --- /dev/null +++ b/supernode/storage_challenge/service.go @@ -0,0 +1,572 @@ +package storage_challenge + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "sort" + "strconv" + "strings" + "time" + + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" + "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" + "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera" + "github.com/LumeraProtocol/supernode/v2/pkg/net/credentials" + grpcclient "github.com/LumeraProtocol/supernode/v2/pkg/net/grpc/client" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/v2/pkg/storagechallenge/deterministic" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "lukechampine.com/blake3" +) + +type Service struct { + cfg Config + identity string + grpcPort uint16 + + lumera lumera.Client + p2p p2p.Client + kr keyring.Keyring + store queries.LocalStoreInterface + + grpcClient *grpcclient.Client + grpcOpts *grpcclient.ClientOptions +} + +type Config struct { + Enabled bool + PollInterval time.Duration + SubmitEvidence bool + KeyName string +} + +func NewService(identity string, grpcPort uint16, lumeraClient lumera.Client, p2pClient p2p.Client, kr keyring.Keyring, store queries.LocalStoreInterface, cfg Config) (*Service, error) { + identity = strings.TrimSpace(identity) + if identity == "" { + return nil, fmt.Errorf("identity is empty") + } + if lumeraClient == nil || lumeraClient.Audit() == nil || lumeraClient.AuditMsg() == nil || lumeraClient.Node() == nil || lumeraClient.SuperNode() == nil { + return nil, fmt.Errorf("lumera client is missing required modules") + } + if p2pClient == nil { + return nil, fmt.Errorf("p2p client is nil") + } + if kr == nil { + return nil, fmt.Errorf("keyring is nil") + } + if strings.TrimSpace(cfg.KeyName) == "" { + return nil, fmt.Errorf("key name is empty") + } + if cfg.PollInterval <= 0 { + cfg.PollInterval = 5 * time.Second + } + + // Ensure keyring key address matches the configured identity (supernode_account). + key, err := kr.Key(cfg.KeyName) + if err != nil { + return nil, fmt.Errorf("keyring key not found: %w", err) + } + addr, err := key.GetAddress() + if err != nil { + return nil, fmt.Errorf("get key address: %w", err) + } + if got := addr.String(); got != identity { + return nil, fmt.Errorf("identity mismatch: config.identity=%s key(%s)=%s", identity, cfg.KeyName, got) + } + + return &Service{ + cfg: cfg, + identity: identity, + grpcPort: grpcPort, + lumera: lumeraClient, + p2p: p2pClient, + kr: kr, + store: store, + }, nil +} + +func (s *Service) Run(ctx context.Context) error { + if !s.cfg.Enabled { + <-ctx.Done() + return nil + } + + if err := s.initClients(ctx); err != nil { + return err + } + + ticker := time.NewTicker(s.cfg.PollInterval) + defer ticker.Stop() + + var lastRunEpoch uint64 + var lastRunOK bool + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + height, ok := s.latestHeight(ctx) + if !ok { + continue + } + + params, ok := s.auditParams(ctx) + if !ok { + continue + } + + epochID, ok := deterministic.EpochID(height, params.EpochZeroHeight, params.EpochLengthBlocks) + if !ok { + continue + } + if !params.ScEnabled { + lastRunEpoch = epochID + lastRunOK = true + continue + } + if lastRunOK && lastRunEpoch == epochID { + continue + } + + anchorResp, err := s.lumera.Audit().GetEpochAnchor(ctx, epochID) + if err != nil || anchorResp == nil || anchorResp.Anchor.EpochId != epochID { + // Anchor may not be committed yet at epoch boundary; retry on next tick. + continue + } + anchor := anchorResp.Anchor + + challengers := deterministic.SelectChallengers(anchor.ActiveSupernodeAccounts, anchor.Seed, epochID, params.ScChallengersPerEpoch) + if !containsString(challengers, s.identity) { + lastRunEpoch = epochID + lastRunOK = true + continue + } + + jitterMs := deterministic.DeterministicJitterMs(anchor.Seed, epochID, s.identity, params.ScStartJitterMs) + if jitterMs > 0 { + timer := time.NewTimer(time.Duration(jitterMs) * time.Millisecond) + select { + case <-ctx.Done(): + timer.Stop() + return ctx.Err() + case <-timer.C: + } + } + + if err := s.runEpoch(ctx, anchor, params); err != nil { + logtrace.Warn(ctx, "storage challenge epoch run error", logtrace.Fields{ + "epoch_id": epochID, + "error": err.Error(), + }) + lastRunEpoch = epochID + lastRunOK = false + continue + } + + lastRunEpoch = epochID + lastRunOK = true + } + } +} + +func (s *Service) initClients(ctx context.Context) error { + validator := lumera.NewSecureKeyExchangeValidator(s.lumera) + + grpcCreds, err := credentials.NewClientCreds(&credentials.ClientOptions{ + CommonOptions: credentials.CommonOptions{ + Keyring: s.kr, + LocalIdentity: s.identity, + PeerType: securekeyx.Supernode, + Validator: validator, + }, + }) + if err != nil { + return fmt.Errorf("create gRPC client creds: %w", err) + } + + s.grpcClient = grpcclient.NewClient(grpcCreds) + s.grpcOpts = grpcclient.DefaultClientOptions() + s.grpcOpts.EnableRetries = true + return nil +} + +func (s *Service) latestHeight(ctx context.Context) (int64, bool) { + resp, err := s.lumera.Node().GetLatestBlock(ctx) + if err != nil || resp == nil || resp.Block == nil { + return 0, false + } + return resp.Block.Header.Height, true +} + +func (s *Service) auditParams(ctx context.Context) (audittypes.Params, bool) { + resp, err := s.lumera.Audit().GetParams(ctx) + if err != nil || resp == nil { + return audittypes.Params{}, false + } + p := resp.Params.WithDefaults() + if err := p.Validate(); err != nil { + return audittypes.Params{}, false + } + return p, true +} + +func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params) error { + epochID := anchor.EpochId + + lookback := s.candidateKeysLookbackDuration(ctx, params) + to := time.Now().UTC() + from := to.Add(-lookback) + + keys, err := s.p2p.GetLocalKeys(ctx, &from, to) + if err != nil { + return fmt.Errorf("get local keys: %w", err) + } + if len(keys) == 0 { + logtrace.Debug(ctx, "storage challenge: no local keys to challenge", logtrace.Fields{"epoch_id": epochID}) + return nil + } + sort.Strings(keys) + + fileKeys := deterministic.SelectFileKeys(keys, anchor.Seed, epochID, s.identity, params.ScFilesPerChallenger) + if len(fileKeys) == 0 { + return nil + } + + for _, fileKey := range fileKeys { + if err := s.runChallengeForFile(ctx, anchor, params, fileKey); err != nil { + logtrace.Warn(ctx, "storage challenge file run error", logtrace.Fields{ + "epoch_id": epochID, + "file_key": fileKey, + "error": err.Error(), + }) + } + } + + return nil +} + +func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params, fileKey string) error { + epochID := anchor.EpochId + + replicas, err := deterministic.SelectReplicaSet(anchor.ActiveSupernodeAccounts, fileKey, params.ScReplicaCount) + if err != nil { + return err + } + + recipient, observers := pickRecipientAndObservers(replicas, s.identity, int(params.ScObserverThreshold)) + if recipient == "" { + return nil + } + + recipientAddr, err := s.supernodeGRPCAddr(ctx, recipient) + if err != nil { + return err + } + type observerPeer struct { + id string + addr string + } + observerPeers := make([]observerPeer, 0, len(observers)) + for _, ob := range observers { + addr, err := s.supernodeGRPCAddr(ctx, ob) + if err != nil { + continue + } + observerPeers = append(observerPeers, observerPeer{id: ob, addr: addr}) + } + + requestedStart := uint64(0) + requestedEnd := params.ScMinSliceBytes + if requestedEnd == 0 { + requestedEnd = 1024 + } + + challengeID := deriveChallengeID(anchor.Seed, epochID, fileKey, s.identity, recipient) + + req := &supernode.GetSliceProofRequest{ + ChallengeId: challengeID, + EpochId: epochID, + Seed: anchor.Seed, + FileKey: fileKey, + RequestedStart: requestedStart, + RequestedEnd: requestedEnd, + ChallengerId: s.identity, + RecipientId: recipient, + ObserverIds: append([]string(nil), observers...), + } + + resp, err := s.callGetSliceProof(ctx, recipientAddr, req, time.Duration(params.ScResponseTimeoutMs)*time.Millisecond) + if err != nil || resp == nil || !resp.Ok { + failure := "RECIPIENT_ERROR" + if err != nil { + failure = "RECIPIENT_UNREACHABLE" + } + return s.maybeSubmitEvidence(ctx, params, epochID, challengeID, fileKey, recipient, failure, transcriptHash(challengeID, req, resp, nil, err)) + } + + sum := blake3.Sum256(resp.Slice) + if got := hex.EncodeToString(sum[:]); got != strings.ToLower(resp.ProofHashHex) { + return s.maybeSubmitEvidence(ctx, params, epochID, challengeID, fileKey, recipient, "INVALID_PROOF", transcriptHash(challengeID, req, resp, nil, fmt.Errorf("proof mismatch"))) + } + + okCount := 0 + required := int(params.ScObserverThreshold) + if required <= 0 { + required = 0 + } + + observerResults := make([]*supernode.VerifySliceProofResponse, 0, len(observerPeers)) + if required > 0 && len(observerPeers) > 0 { + verifyReq := &supernode.VerifySliceProofRequest{ + ChallengeId: challengeID, + EpochId: epochID, + FileKey: fileKey, + Start: resp.Start, + End: resp.End, + Slice: resp.Slice, + ProofHashHex: resp.ProofHashHex, + ChallengerId: s.identity, + RecipientId: recipient, + } + + timeout := time.Duration(params.ScAffirmationTimeoutMs) * time.Millisecond + if timeout <= 0 { + timeout = 30 * time.Second + } + + for _, peer := range observerPeers { + vr, verr := s.callVerifySliceProof(ctx, peer.addr, verifyReq, timeout) + if verr == nil && vr != nil && vr.Ok { + okCount++ + } + observerResults = append(observerResults, vr) + } + } + + if required > 0 && okCount < required { + return s.maybeSubmitEvidence(ctx, params, epochID, challengeID, fileKey, recipient, "OBSERVER_QUORUM_FAILED", transcriptHash(challengeID, req, resp, observerResults, fmt.Errorf("observer quorum failed"))) + } + + logtrace.Info(ctx, "storage challenge ok", logtrace.Fields{ + "epoch_id": epochID, + "challenge_id": challengeID, + "file_key": fileKey, + "recipient_id": recipient, + "observers_ok": okCount, + "observers_req": required, + }) + + return nil +} + +func (s *Service) supernodeGRPCAddr(ctx context.Context, supernodeAccount string) (string, error) { + info, err := s.lumera.SuperNode().GetSupernodeWithLatestAddress(ctx, supernodeAccount) + if err != nil || info == nil { + return "", fmt.Errorf("resolve supernode address: %w", err) + } + host := strings.TrimSpace(info.LatestAddress) + if host == "" { + return "", fmt.Errorf("no ip address for supernode %s", supernodeAccount) + } + return net.JoinHostPort(host, fmt.Sprintf("%d", s.grpcPort)), nil +} + +func (s *Service) callGetSliceProof(ctx context.Context, address string, req *supernode.GetSliceProofRequest, timeout time.Duration) (*supernode.GetSliceProofResponse, error) { + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + conn, err := s.grpcClient.Connect(cctx, address, s.grpcOpts) + if err != nil { + return nil, err + } + defer conn.Close() + + client := supernode.NewStorageChallengeServiceClient(conn) + return client.GetSliceProof(cctx, req) +} + +func (s *Service) callVerifySliceProof(ctx context.Context, address string, req *supernode.VerifySliceProofRequest, timeout time.Duration) (*supernode.VerifySliceProofResponse, error) { + cctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + conn, err := s.grpcClient.Connect(cctx, address, s.grpcOpts) + if err != nil { + return nil, err + } + defer conn.Close() + + client := supernode.NewStorageChallengeServiceClient(conn) + return client.VerifySliceProof(cctx, req) +} + +func (s *Service) maybeSubmitEvidence(ctx context.Context, params audittypes.Params, epochID uint64, challengeID, fileKey, recipient, failureType, transcriptHashHex string) error { + if !s.cfg.SubmitEvidence || !params.ScEnabled { + return nil + } + + meta := audittypes.StorageChallengeFailureEvidenceMetadata{ + EpochId: epochID, + ChallengerSupernodeAccount: s.identity, + ChallengedSupernodeAccount: recipient, + ChallengeId: challengeID, + FileKey: fileKey, + FailureType: failureType, + TranscriptHash: transcriptHashHex, + } + bz, err := json.Marshal(meta) + if err != nil { + return err + } + if params.ScEvidenceMaxBytes > 0 && uint64(len(bz)) > params.ScEvidenceMaxBytes { + return fmt.Errorf("evidence metadata too large: %d > %d", len(bz), params.ScEvidenceMaxBytes) + } + + _, err = s.lumera.AuditMsg().SubmitEvidence(ctx, recipient, audittypes.EvidenceType_EVIDENCE_TYPE_STORAGE_CHALLENGE_FAILURE, "", string(bz)) + if err != nil { + return err + } + logtrace.Warn(ctx, "storage challenge failure evidence submitted", logtrace.Fields{ + "epoch_id": epochID, + "challenge_id": challengeID, + "recipient_id": recipient, + "failure_type": failureType, + }) + return nil +} + +func deriveChallengeID(seed []byte, epochID uint64, fileKey, challenger, recipient string) string { + msg := []byte("sc:challenge:" + hex.EncodeToString(seed) + ":" + strconv.FormatUint(epochID, 10) + ":" + fileKey + ":" + challenger + ":" + recipient) + sum := blake3.Sum256(msg) + return hex.EncodeToString(sum[:]) +} + +func transcriptHash(challengeID string, req *supernode.GetSliceProofRequest, resp *supernode.GetSliceProofResponse, obs []*supernode.VerifySliceProofResponse, err error) string { + type t struct { + ChallengeID string `json:"challenge_id"` + Request *supernode.GetSliceProofRequest `json:"request,omitempty"` + Response *supernode.GetSliceProofResponse `json:"response,omitempty"` + Observers []*supernode.VerifySliceProofResponse `json:"observers,omitempty"` + Error string `json:"error,omitempty"` + } + out := t{ChallengeID: challengeID, Request: req, Response: resp, Observers: obs} + if err != nil { + out.Error = err.Error() + } + bz, _ := json.Marshal(out) + sum := blake3.Sum256(bz) + return hex.EncodeToString(sum[:]) +} + +func pickRecipientAndObservers(replicas []string, self string, observerCount int) (string, []string) { + out := make([]string, 0, maxInt(0, observerCount)) + recipient := "" + for _, r := range replicas { + if r == self { + continue + } + recipient = r + break + } + if recipient == "" { + return "", nil + } + for _, r := range replicas { + if r == self || r == recipient { + continue + } + out = append(out, r) + if observerCount > 0 && len(out) >= observerCount { + break + } + } + sort.Strings(out) + return recipient, out +} + +func containsString(list []string, v string) bool { + for _, s := range list { + if s == v { + return true + } + } + return false +} + +func (s *Service) candidateKeysLookbackDuration(ctx context.Context, params audittypes.Params) time.Duration { + epochs := params.ScCandidateKeysLookbackEpochs + if epochs == 0 { + epochs = 1 + } + + epochDuration, ok := s.estimateEpochDuration(ctx, params) + if !ok || epochDuration <= 0 { + // Fallback to legacy "1 epoch == 24h" assumption if we can't estimate block time. + return time.Duration(epochs) * 24 * time.Hour + } + return time.Duration(epochs) * epochDuration +} + +func (s *Service) estimateEpochDuration(ctx context.Context, params audittypes.Params) (time.Duration, bool) { + if params.EpochLengthBlocks == 0 { + return 0, false + } + + latest, err := s.lumera.Node().GetLatestBlock(ctx) + if err != nil || latest == nil || latest.Block == nil { + return 0, false + } + latestHeight := latest.Block.Header.Height + latestTime := latest.Block.Header.Time + if latestHeight <= 1 { + return 0, false + } + + // Sample over the last N blocks to smooth variance. + const sampleBlocks int64 = 100 + n := sampleBlocks + if latestHeight <= sampleBlocks { + n = latestHeight - 1 + } + olderHeight := latestHeight - n + + older, err := s.lumera.Node().GetBlockByHeight(ctx, olderHeight) + if err != nil || older == nil || older.Block == nil { + return 0, false + } + olderTime := older.Block.Header.Time + + dt := latestTime.Sub(olderTime) + if dt <= 0 { + return 0, false + } + avgBlockTime := dt / time.Duration(n) + if avgBlockTime <= 0 { + return 0, false + } + + // Guard against wildly wrong clocks. + if avgBlockTime > 2*time.Minute { + return 0, false + } + + // epoch_duration ~= epoch_length_blocks * avg_block_time + epochBlocks := params.EpochLengthBlocks + if epochBlocks > uint64(^uint64(0)>>1) { + return 0, false + } + return time.Duration(epochBlocks) * avgBlockTime, true +} + +func maxInt(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/supernode/supernode_metrics/reachability_active_probing_test.go b/supernode/supernode_metrics/reachability_active_probing_test.go index 530ac2fd..5f138fe9 100644 --- a/supernode/supernode_metrics/reachability_active_probing_test.go +++ b/supernode/supernode_metrics/reachability_active_probing_test.go @@ -15,6 +15,8 @@ import ( sntypes "github.com/LumeraProtocol/lumera/x/supernode/v1/types" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/action_msg" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit" + "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/audit_msg" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/auth" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/bank" "github.com/LumeraProtocol/supernode/v2/pkg/lumera/modules/node" @@ -405,6 +407,8 @@ type fakeLumeraClient struct { func (c *fakeLumeraClient) Auth() auth.Module { return nil } func (c *fakeLumeraClient) Action() action.Module { return nil } func (c *fakeLumeraClient) ActionMsg() action_msg.Module { return nil } +func (c *fakeLumeraClient) Audit() audit.Module { return nil } +func (c *fakeLumeraClient) AuditMsg() audit_msg.Module { return nil } func (c *fakeLumeraClient) SuperNode() supernode.Module { return c.snModule } func (c *fakeLumeraClient) SuperNodeMsg() supernode_msg.Module { return nil } func (c *fakeLumeraClient) Bank() bank.Module { return nil } diff --git a/supernode/transport/grpc/storage_challenge/handler.go b/supernode/transport/grpc/storage_challenge/handler.go new file mode 100644 index 00000000..732790d4 --- /dev/null +++ b/supernode/transport/grpc/storage_challenge/handler.go @@ -0,0 +1,193 @@ +package storage_challenge + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "time" + + "github.com/LumeraProtocol/supernode/v2/gen/supernode" + "github.com/LumeraProtocol/supernode/v2/p2p" + "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" + "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" + "github.com/LumeraProtocol/supernode/v2/pkg/types" + "lukechampine.com/blake3" +) + +type Server struct { + supernode.UnimplementedStorageChallengeServiceServer + + identity string + p2p p2p.Client + store queries.LocalStoreInterface +} + +func NewServer(identity string, p2pClient p2p.Client, store queries.LocalStoreInterface) *Server { + return &Server{identity: identity, p2p: p2pClient, store: store} +} + +func (s *Server) GetSliceProof(ctx context.Context, req *supernode.GetSliceProofRequest) (*supernode.GetSliceProofResponse, error) { + if req == nil { + return nil, fmt.Errorf("nil request") + } + + if req.FileKey == "" { + return &supernode.GetSliceProofResponse{ChallengeId: req.ChallengeId, EpochId: req.EpochId, Ok: false, Error: "file_key is required"}, nil + } + + data, err := s.p2p.Retrieve(ctx, req.FileKey, true) + if err != nil { + return &supernode.GetSliceProofResponse{ChallengeId: req.ChallengeId, EpochId: req.EpochId, FileKey: req.FileKey, RecipientId: s.identity, Ok: false, Error: err.Error()}, nil + } + if len(data) == 0 { + return &supernode.GetSliceProofResponse{ChallengeId: req.ChallengeId, EpochId: req.EpochId, FileKey: req.FileKey, RecipientId: s.identity, Ok: false, Error: "file not found"}, nil + } + + start := req.RequestedStart + end := req.RequestedEnd + if end <= start { + start = 0 + end = uint64(len(data)) + } + if start >= uint64(len(data)) { + start = 0 + } + if end > uint64(len(data)) { + end = uint64(len(data)) + } + if end < start { + end = start + } + + slice := make([]byte, int(end-start)) + copy(slice, data[start:end]) + sum := blake3.Sum256(slice) + proofHex := hex.EncodeToString(sum[:]) + + resp := &supernode.GetSliceProofResponse{ + ChallengeId: req.ChallengeId, + EpochId: req.EpochId, + FileKey: req.FileKey, + Start: start, + End: end, + RecipientId: s.identity, + Slice: slice, + ProofHashHex: proofHex, + Ok: true, + } + + s.persistRecipientProof(ctx, req, resp) + return resp, nil +} + +func (s *Server) VerifySliceProof(ctx context.Context, req *supernode.VerifySliceProofRequest) (*supernode.VerifySliceProofResponse, error) { + if req == nil { + return nil, fmt.Errorf("nil request") + } + + if req.ProofHashHex == "" { + return &supernode.VerifySliceProofResponse{ChallengeId: req.ChallengeId, EpochId: req.EpochId, ObserverId: s.identity, Ok: false, Error: "proof_hash_hex is required"}, nil + } + + sum := blake3.Sum256(req.Slice) + want := req.ProofHashHex + got := hex.EncodeToString(sum[:]) + ok := got == want + errStr := "" + if !ok { + errStr = fmt.Sprintf("proof mismatch: want=%s got=%s", want, got) + } + + resp := &supernode.VerifySliceProofResponse{ + ChallengeId: req.ChallengeId, + EpochId: req.EpochId, + ObserverId: s.identity, + Ok: ok, + Error: errStr, + } + s.persistObserverVerification(ctx, req, resp) + return resp, nil +} + +func (s *Server) persistRecipientProof(ctx context.Context, req *supernode.GetSliceProofRequest, resp *supernode.GetSliceProofResponse) { + if s.store == nil { + return + } + + challenge := types.MessageData{ + ChallengerID: req.ChallengerId, + RecipientID: req.RecipientId, + Observers: append([]string(nil), req.ObserverIds...), + Challenge: types.ChallengeData{ + FileHash: req.FileKey, + StartIndex: int(req.RequestedStart), + EndIndex: int(req.RequestedEnd), + Timestamp: time.Now().UTC(), + }, + } + challengeBz, _ := json.Marshal(challenge) + _ = s.store.InsertStorageChallengeMessage(types.StorageChallengeLogMessage{ + MessageType: int(types.ChallengeMessageType), + ChallengeID: req.ChallengeId, + Data: challengeBz, + Sender: s.identity, + SenderSignature: []byte{}, + }) + + response := types.MessageData{ + ChallengerID: req.ChallengerId, + RecipientID: req.RecipientId, + Observers: append([]string(nil), req.ObserverIds...), + Response: types.ResponseData{ + Hash: resp.ProofHashHex, + Timestamp: time.Now().UTC(), + }, + } + responseBz, _ := json.Marshal(response) + _ = s.store.InsertStorageChallengeMessage(types.StorageChallengeLogMessage{ + MessageType: int(types.ResponseMessageType), + ChallengeID: req.ChallengeId, + Data: responseBz, + Sender: s.identity, + SenderSignature: []byte{}, + }) + + logtrace.Debug(ctx, "storage challenge proof served", logtrace.Fields{ + "challenge_id": req.ChallengeId, + "file_key": req.FileKey, + "start": resp.Start, + "end": resp.End, + }) +} + +func (s *Server) persistObserverVerification(ctx context.Context, req *supernode.VerifySliceProofRequest, resp *supernode.VerifySliceProofResponse) { + if s.store == nil { + return + } + + eval := types.MessageData{ + ChallengerID: req.ChallengerId, + RecipientID: req.RecipientId, + Observers: []string{s.identity}, + ObserverEvaluation: types.ObserverEvaluationData{ + IsEvaluationResultOK: resp.Ok, + Reason: resp.Error, + TrueHash: req.ProofHashHex, + Timestamp: time.Now().UTC(), + }, + } + bz, _ := json.Marshal(eval) + _ = s.store.InsertStorageChallengeMessage(types.StorageChallengeLogMessage{ + MessageType: int(types.AffirmationMessageType), + ChallengeID: req.ChallengeId, + Data: bz, + Sender: s.identity, + SenderSignature: []byte{}, + }) + + logtrace.Debug(ctx, "storage challenge proof verified", logtrace.Fields{ + "challenge_id": req.ChallengeId, + "ok": resp.Ok, + }) +} From 95710fc7bf88e620c6a4caa6356ef8386c36e6fd Mon Sep 17 00:00:00 2001 From: j-rafique Date: Mon, 9 Feb 2026 20:36:11 +0500 Subject: [PATCH 2/8] audit : align with lumera changes --- pkg/lumera/Readme.md | 2 +- pkg/lumera/modules/audit/impl.go | 6 +- pkg/lumera/modules/audit/interface.go | 2 +- .../modules/audit_msg/audit_msg_mock.go | 12 ++-- pkg/lumera/modules/audit_msg/impl.go | 23 +++---- pkg/lumera/modules/audit_msg/interface.go | 2 +- pkg/testutil/lumera.go | 6 +- supernode/cmd/start.go | 9 +-- .../service.go | 67 ++++++++++++++----- supernode/storage_challenge/README.md | 2 - supernode/storage_challenge/service.go | 57 +++++++++++----- 11 files changed, 123 insertions(+), 65 deletions(-) rename supernode/{audit_reporter => host_reporter}/service.go (69%) diff --git a/pkg/lumera/Readme.md b/pkg/lumera/Readme.md index a636ccda..0da98989 100644 --- a/pkg/lumera/Readme.md +++ b/pkg/lumera/Readme.md @@ -26,7 +26,7 @@ Using modules - `cli.Action()` – query actions (GetAction, GetActionFee, GetParams) - `cli.ActionMsg()` – send action messages (see below) - `cli.Audit()` – query `x/audit` (params/epochs/anchors/assignments/reports) -- `cli.AuditMsg()` – submit `x/audit` txs (`MsgSubmitAuditReport`, `MsgSubmitEvidence`) +- `cli.AuditMsg()` – submit `x/audit` txs (`MsgSubmitEpochReport`, `MsgSubmitEvidence`) - `cli.Auth()` – accounts/verify - `cli.SuperNode()` – supernode queries - `cli.Tx()` – tx internals (shared by helpers) diff --git a/pkg/lumera/modules/audit/impl.go b/pkg/lumera/modules/audit/impl.go index 7ed1383f..06b26908 100644 --- a/pkg/lumera/modules/audit/impl.go +++ b/pkg/lumera/modules/audit/impl.go @@ -63,13 +63,13 @@ func (m *module) GetAssignedTargets(ctx context.Context, supernodeAccount string return resp, nil } -func (m *module) GetAuditReport(ctx context.Context, epochID uint64, supernodeAccount string) (*types.QueryAuditReportResponse, error) { - resp, err := m.client.AuditReport(ctx, &types.QueryAuditReportRequest{ +func (m *module) GetEpochReport(ctx context.Context, epochID uint64, supernodeAccount string) (*types.QueryEpochReportResponse, error) { + resp, err := m.client.EpochReport(ctx, &types.QueryEpochReportRequest{ EpochId: epochID, SupernodeAccount: supernodeAccount, }) if err != nil { - return nil, fmt.Errorf("failed to get audit report: %w", err) + return nil, fmt.Errorf("failed to get epoch report: %w", err) } return resp, nil } diff --git a/pkg/lumera/modules/audit/interface.go b/pkg/lumera/modules/audit/interface.go index 9d9eb60d..a5ac2939 100644 --- a/pkg/lumera/modules/audit/interface.go +++ b/pkg/lumera/modules/audit/interface.go @@ -14,7 +14,7 @@ type Module interface { GetCurrentEpochAnchor(ctx context.Context) (*types.QueryCurrentEpochAnchorResponse, error) GetCurrentEpoch(ctx context.Context) (*types.QueryCurrentEpochResponse, error) GetAssignedTargets(ctx context.Context, supernodeAccount string, epochID uint64) (*types.QueryAssignedTargetsResponse, error) - GetAuditReport(ctx context.Context, epochID uint64, supernodeAccount string) (*types.QueryAuditReportResponse, error) + GetEpochReport(ctx context.Context, epochID uint64, supernodeAccount string) (*types.QueryEpochReportResponse, error) } // NewModule creates a new Audit module client. diff --git a/pkg/lumera/modules/audit_msg/audit_msg_mock.go b/pkg/lumera/modules/audit_msg/audit_msg_mock.go index 7ce8be97..354289b7 100644 --- a/pkg/lumera/modules/audit_msg/audit_msg_mock.go +++ b/pkg/lumera/modules/audit_msg/audit_msg_mock.go @@ -57,17 +57,17 @@ func (mr *MockModuleMockRecorder) SubmitEvidence(ctx, subjectAddress, evidenceTy return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitEvidence", reflect.TypeOf((*MockModule)(nil).SubmitEvidence), ctx, subjectAddress, evidenceType, actionID, metadataJSON) } -// SubmitAuditReport mocks base method. -func (m *MockModule) SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*types.AuditPeerObservation) (*tx.BroadcastTxResponse, error) { +// SubmitEpochReport mocks base method. +func (m *MockModule) SubmitEpochReport(ctx context.Context, epochID uint64, hostReport types.HostReport, storageChallengeObservations []*types.StorageChallengeObservation) (*tx.BroadcastTxResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SubmitAuditReport", ctx, epochID, peerObservations) + ret := m.ctrl.Call(m, "SubmitEpochReport", ctx, epochID, hostReport, storageChallengeObservations) ret0, _ := ret[0].(*tx.BroadcastTxResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// SubmitAuditReport indicates an expected call of SubmitAuditReport. -func (mr *MockModuleMockRecorder) SubmitAuditReport(ctx, epochID, peerObservations any) *gomock.Call { +// SubmitEpochReport indicates an expected call of SubmitEpochReport. +func (mr *MockModuleMockRecorder) SubmitEpochReport(ctx, epochID, hostReport, storageChallengeObservations any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitAuditReport", reflect.TypeOf((*MockModule)(nil).SubmitAuditReport), ctx, epochID, peerObservations) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubmitEpochReport", reflect.TypeOf((*MockModule)(nil).SubmitEpochReport), ctx, epochID, hostReport, storageChallengeObservations) } diff --git a/pkg/lumera/modules/audit_msg/impl.go b/pkg/lumera/modules/audit_msg/impl.go index bf0fae46..855bc154 100644 --- a/pkg/lumera/modules/audit_msg/impl.go +++ b/pkg/lumera/modules/audit_msg/impl.go @@ -78,24 +78,23 @@ func (m *module) SubmitEvidence(ctx context.Context, subjectAddress string, evid }) } -func (m *module) SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*audittypes.AuditPeerObservation) (*sdktx.BroadcastTxResponse, error) { +func (m *module) SubmitEpochReport(ctx context.Context, epochID uint64, hostReport audittypes.HostReport, storageChallengeObservations []*audittypes.StorageChallengeObservation) (*sdktx.BroadcastTxResponse, error) { m.mu.Lock() defer m.mu.Unlock() - // Intentionally submit 0% usage for CPU/memory/disk so the chain treats these as "unknown" + // Intentionally submit 0% usage for CPU/memory so the chain treats these as "unknown" // (see x/audit enforcement semantics). - selfReport := audittypes.AuditSelfReport{ - CpuUsagePercent: 0, - MemUsagePercent: 0, - DiskUsagePercent: 0, - } + // + // Disk usage is expected to be reported accurately (legacy-aligned); callers provide it. + hostReport.CpuUsagePercent = 0 + hostReport.MemUsagePercent = 0 return m.txHelper.ExecuteTransaction(ctx, func(creator string) (sdktypes.Msg, error) { - return &audittypes.MsgSubmitAuditReport{ - SupernodeAccount: creator, - EpochId: epochID, - SelfReport: selfReport, - PeerObservations: peerObservations, + return &audittypes.MsgSubmitEpochReport{ + Creator: creator, + EpochId: epochID, + HostReport: hostReport, + StorageChallengeObservations: storageChallengeObservations, }, nil }) } diff --git a/pkg/lumera/modules/audit_msg/interface.go b/pkg/lumera/modules/audit_msg/interface.go index d8fe6a17..9ce586c4 100644 --- a/pkg/lumera/modules/audit_msg/interface.go +++ b/pkg/lumera/modules/audit_msg/interface.go @@ -15,7 +15,7 @@ import ( // Module defines the interface for audit-related transactions. type Module interface { SubmitEvidence(ctx context.Context, subjectAddress string, evidenceType audittypes.EvidenceType, actionID string, metadataJSON string) (*sdktx.BroadcastTxResponse, error) - SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*audittypes.AuditPeerObservation) (*sdktx.BroadcastTxResponse, error) + SubmitEpochReport(ctx context.Context, epochID uint64, hostReport audittypes.HostReport, storageChallengeObservations []*audittypes.StorageChallengeObservation) (*sdktx.BroadcastTxResponse, error) } // NewModule creates a new audit_msg module instance. diff --git a/pkg/testutil/lumera.go b/pkg/testutil/lumera.go index c9d287d3..d7bd1212 100644 --- a/pkg/testutil/lumera.go +++ b/pkg/testutil/lumera.go @@ -213,8 +213,8 @@ func (m *MockAuditModule) GetAssignedTargets(ctx context.Context, supernodeAccou return &audittypes.QueryAssignedTargetsResponse{}, nil } -func (m *MockAuditModule) GetAuditReport(ctx context.Context, epochID uint64, supernodeAccount string) (*audittypes.QueryAuditReportResponse, error) { - return &audittypes.QueryAuditReportResponse{}, nil +func (m *MockAuditModule) GetEpochReport(ctx context.Context, epochID uint64, supernodeAccount string) (*audittypes.QueryEpochReportResponse, error) { + return &audittypes.QueryEpochReportResponse{}, nil } type MockAuditMsgModule struct{} @@ -223,7 +223,7 @@ func (m *MockAuditMsgModule) SubmitEvidence(ctx context.Context, subjectAddress return &sdktx.BroadcastTxResponse{}, nil } -func (m *MockAuditMsgModule) SubmitAuditReport(ctx context.Context, epochID uint64, peerObservations []*audittypes.AuditPeerObservation) (*sdktx.BroadcastTxResponse, error) { +func (m *MockAuditMsgModule) SubmitEpochReport(ctx context.Context, epochID uint64, hostReport audittypes.HostReport, storageChallengeObservations []*audittypes.StorageChallengeObservation) (*sdktx.BroadcastTxResponse, error) { return &sdktx.BroadcastTxResponse{}, nil } diff --git a/supernode/cmd/start.go b/supernode/cmd/start.go index 09f0d0f3..3e2a4855 100644 --- a/supernode/cmd/start.go +++ b/supernode/cmd/start.go @@ -21,9 +21,9 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/storage/queries" "github.com/LumeraProtocol/supernode/v2/pkg/storage/rqstore" "github.com/LumeraProtocol/supernode/v2/pkg/task" - auditReporterService "github.com/LumeraProtocol/supernode/v2/supernode/audit_reporter" cascadeService "github.com/LumeraProtocol/supernode/v2/supernode/cascade" "github.com/LumeraProtocol/supernode/v2/supernode/config" + hostReporterService "github.com/LumeraProtocol/supernode/v2/supernode/host_reporter" statusService "github.com/LumeraProtocol/supernode/v2/supernode/status" storageChallengeService "github.com/LumeraProtocol/supernode/v2/supernode/storage_challenge" // Legacy supernode metrics reporter (MsgReportSupernodeMetrics) has been superseded by @@ -162,14 +162,15 @@ The supernode will connect to the Lumera network and begin participating in the // Create supernode status service with injected tracker statusSvc := statusService.NewSupernodeStatusService(p2pService, lumeraClient, appConfig, tr) - auditReporter, err := auditReporterService.NewService( + hostReporter, err := hostReporterService.NewService( appConfig.SupernodeConfig.Identity, lumeraClient, kr, appConfig.SupernodeConfig.KeyName, + appConfig.BaseDir, ) if err != nil { - logtrace.Fatal(ctx, "Failed to initialize audit reporter", logtrace.Fields{"error": err.Error()}) + logtrace.Fatal(ctx, "Failed to initialize host reporter", logtrace.Fields{"error": err.Error()}) } // Legacy on-chain supernode metrics reporting has been superseded by `x/audit`. @@ -247,7 +248,7 @@ The supernode will connect to the Lumera network and begin participating in the // Start the services using the standard runner and capture exit servicesErr := make(chan error, 1) go func() { - services := []service{grpcServer, cService, p2pService, gatewayServer, auditReporter} + services := []service{grpcServer, cService, p2pService, gatewayServer, hostReporter} if storageChallengeRunner != nil { services = append(services, storageChallengeRunner) } diff --git a/supernode/audit_reporter/service.go b/supernode/host_reporter/service.go similarity index 69% rename from supernode/audit_reporter/service.go rename to supernode/host_reporter/service.go index b00f9395..40f0083c 100644 --- a/supernode/audit_reporter/service.go +++ b/supernode/host_reporter/service.go @@ -1,4 +1,4 @@ -package audit_reporter +package host_reporter import ( "context" @@ -12,6 +12,7 @@ import ( "github.com/LumeraProtocol/supernode/v2/pkg/logtrace" "github.com/LumeraProtocol/supernode/v2/pkg/lumera" "github.com/LumeraProtocol/supernode/v2/pkg/reachability" + statussvc "github.com/LumeraProtocol/supernode/v2/supernode/status" "github.com/cosmos/cosmos-sdk/crypto/keyring" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -24,7 +25,7 @@ const ( maxConcurrentTargets = 8 ) -// Service submits one MsgSubmitAuditReport per epoch for the local supernode. +// Service submits one MsgSubmitEpochReport per epoch for the local supernode. // All runtime behavior is driven by on-chain params/queries; there are no local config knobs. type Service struct { identity string @@ -35,9 +36,12 @@ type Service struct { pollInterval time.Duration dialTimeout time.Duration + + metrics *statussvc.MetricsCollector + storagePaths []string } -func NewService(identity string, lumeraClient lumera.Client, kr keyring.Keyring, keyName string) (*Service, error) { +func NewService(identity string, lumeraClient lumera.Client, kr keyring.Keyring, keyName string, baseDir string) (*Service, error) { identity = strings.TrimSpace(identity) if identity == "" { return nil, fmt.Errorf("identity is empty") @@ -66,6 +70,12 @@ func NewService(identity string, lumeraClient lumera.Client, kr keyring.Keyring, return nil, fmt.Errorf("identity mismatch: config.identity=%s key(%s)=%s", identity, keyName, got) } + storagePaths := []string{} + if baseDir = strings.TrimSpace(baseDir); baseDir != "" { + // Match legacy disk reporting behavior: measure the volume where the supernode stores its data. + storagePaths = []string{baseDir} + } + return &Service{ identity: identity, lumera: lumeraClient, @@ -73,6 +83,8 @@ func NewService(identity string, lumeraClient lumera.Client, kr keyring.Keyring, keyName: keyName, pollInterval: defaultPollInterval, dialTimeout: defaultDialTimeout, + metrics: statussvc.NewMetricsCollector(), + storagePaths: storagePaths, }, nil } @@ -105,7 +117,7 @@ func (s *Service) tick(ctx context.Context) { } // Idempotency: if a report exists for this epoch, do nothing. - if _, err := s.lumera.Audit().GetAuditReport(ctx, epochID, s.identity); err == nil { + if _, err := s.lumera.Audit().GetEpochReport(ctx, epochID, s.identity); err == nil { return } else if status.Code(err) != codes.NotFound { return @@ -116,28 +128,49 @@ func (s *Service) tick(ctx context.Context) { return } - peerObservations := s.buildPeerObservations(ctx, epochID, assignResp.RequiredOpenPorts, assignResp.TargetSupernodeAccounts) + storageChallengeObservations := s.buildStorageChallengeObservations(ctx, epochID, assignResp.RequiredOpenPorts, assignResp.TargetSupernodeAccounts) + + hostReport := audittypes.HostReport{ + // Intentionally submit 0% usage for CPU/memory so the chain treats these as "unknown". + // Disk usage is reported accurately (legacy-aligned) so disk-based enforcement can work. + CpuUsagePercent: 0, + MemUsagePercent: 0, + } + if diskUsagePercent, ok := s.diskUsagePercent(ctx); ok { + hostReport.DiskUsagePercent = diskUsagePercent + } - if _, err := s.lumera.AuditMsg().SubmitAuditReport(ctx, epochID, peerObservations); err != nil { - logtrace.Warn(ctx, "audit report submit failed", logtrace.Fields{ + if _, err := s.lumera.AuditMsg().SubmitEpochReport(ctx, epochID, hostReport, storageChallengeObservations); err != nil { + logtrace.Warn(ctx, "epoch report submit failed", logtrace.Fields{ "epoch_id": epochID, "error": err.Error(), }) return } - logtrace.Info(ctx, "audit report submitted", logtrace.Fields{ - "epoch_id": epochID, - "peer_observations_count": len(peerObservations), + logtrace.Info(ctx, "epoch report submitted", logtrace.Fields{ + "epoch_id": epochID, + "storage_challenge_observations_count": len(storageChallengeObservations), }) } -func (s *Service) buildPeerObservations(ctx context.Context, epochID uint64, requiredOpenPorts []uint32, targets []string) []*audittypes.AuditPeerObservation { +func (s *Service) diskUsagePercent(ctx context.Context) (float64, bool) { + if s.metrics == nil || len(s.storagePaths) == 0 { + return 0, false + } + infos := s.metrics.CollectStorageMetrics(ctx, s.storagePaths) + if len(infos) == 0 { + return 0, false + } + return infos[0].UsagePercent, true +} + +func (s *Service) buildStorageChallengeObservations(ctx context.Context, epochID uint64, requiredOpenPorts []uint32, targets []string) []*audittypes.StorageChallengeObservation { if len(targets) == 0 { return nil } - out := make([]*audittypes.AuditPeerObservation, len(targets)) + out := make([]*audittypes.StorageChallengeObservation, len(targets)) type workItem struct { index int @@ -171,8 +204,8 @@ func (s *Service) buildPeerObservations(ctx context.Context, epochID uint64, req <-done } - // ensure no nil elements (MsgSubmitAuditReport rejects nil observations) - final := make([]*audittypes.AuditPeerObservation, 0, len(out)) + // ensure no nil elements (MsgSubmitEpochReport rejects nil observations) + final := make([]*audittypes.StorageChallengeObservation, 0, len(out)) for i := range out { if out[i] != nil { final = append(final, out[i]) @@ -181,7 +214,7 @@ func (s *Service) buildPeerObservations(ctx context.Context, epochID uint64, req return final } -func (s *Service) observeTarget(ctx context.Context, epochID uint64, requiredOpenPorts []uint32, target string) *audittypes.AuditPeerObservation { +func (s *Service) observeTarget(ctx context.Context, epochID uint64, requiredOpenPorts []uint32, target string) *audittypes.StorageChallengeObservation { target = strings.TrimSpace(target) if target == "" { return nil @@ -189,7 +222,7 @@ func (s *Service) observeTarget(ctx context.Context, epochID uint64, requiredOpe host, err := s.targetHost(ctx, target) if err != nil { - logtrace.Warn(ctx, "audit observe target: resolve host failed", logtrace.Fields{ + logtrace.Warn(ctx, "storage challenge observe target: resolve host failed", logtrace.Fields{ "epoch_id": epochID, "target": target, "error": err.Error(), @@ -202,7 +235,7 @@ func (s *Service) observeTarget(ctx context.Context, epochID uint64, requiredOpe portStates = append(portStates, probeTCP(ctx, host, p, s.dialTimeout)) } - return &audittypes.AuditPeerObservation{ + return &audittypes.StorageChallengeObservation{ TargetSupernodeAccount: target, PortStates: portStates, } diff --git a/supernode/storage_challenge/README.md b/supernode/storage_challenge/README.md index 99d74a65..1d3f8afb 100644 --- a/supernode/storage_challenge/README.md +++ b/supernode/storage_challenge/README.md @@ -34,7 +34,6 @@ For each selected `file_key`: On failure conditions (recipient error/unreachable, invalid proof, observer quorum failure), and only when enabled: - Build `StorageChallengeFailureEvidenceMetadata` JSON. -- Enforce `sc_evidence_max_bytes` locally before submission. - Submit `MsgSubmitEvidence` to `x/audit` using the Supernode’s Cosmos key. Evidence is chain-verifiable at the policy/assignment level (epoch anchor + deterministic rules), while full transcripts remain @@ -48,4 +47,3 @@ off-chain (supernode logs and local stores). service retries on the next tick. - Candidate key lookback: lookback is computed as `lookback_epochs * estimated_epoch_duration`, where epoch duration is estimated from recent blocks. If estimation fails, it falls back to `24h * lookback_epochs`. - diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go index 2dbdb145..cab7d30e 100644 --- a/supernode/storage_challenge/service.go +++ b/supernode/storage_challenge/service.go @@ -25,6 +25,36 @@ import ( "lukechampine.com/blake3" ) +// Storage challenge (SC) execution knobs are intentionally owned by the supernode binary, +// not by on-chain params. The chain only needs: +// - sc_enabled: feature gate for evidence acceptance/validation +// - sc_challengers_per_epoch: deterministic challenger selection size +// - epoch cadence: epoch_zero_height + epoch_length_blocks +const ( + // scStartJitterMs is a deterministic delay applied before running the epoch, to spread load. + scStartJitterMs = uint64(60_000) // 60s + + // scFilesPerChallenger is how many local file keys each challenger attempts per epoch. + scFilesPerChallenger = uint32(2) + + // scReplicaCount is the size of the replica set considered for recipient/observer selection. + scReplicaCount = uint32(5) + + // scObserverThreshold is the quorum of observer affirmations required for a successful proof. + scObserverThreshold = uint32(2) + + // scMinSliceBytes/scMaxSliceBytes bound the requested proof slice range. + scMinSliceBytes = uint64(1024) + scMaxSliceBytes = uint64(65_536) + + // scResponseTimeout/scAffirmationTimeout are the gRPC timeouts for recipient proof and observer verification. + scResponseTimeout = 30 * time.Second + scAffirmationTimeout = 30 * time.Second + + // scCandidateKeysLookbackEpochs is how many epochs back we look for candidate local keys. + scCandidateKeysLookbackEpochs = uint32(1) +) + type Service struct { cfg Config identity string @@ -149,7 +179,7 @@ func (s *Service) Run(ctx context.Context) error { continue } - jitterMs := deterministic.DeterministicJitterMs(anchor.Seed, epochID, s.identity, params.ScStartJitterMs) + jitterMs := deterministic.DeterministicJitterMs(anchor.Seed, epochID, s.identity, scStartJitterMs) if jitterMs > 0 { timer := time.NewTimer(time.Duration(jitterMs) * time.Millisecond) select { @@ -234,7 +264,7 @@ func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, p } sort.Strings(keys) - fileKeys := deterministic.SelectFileKeys(keys, anchor.Seed, epochID, s.identity, params.ScFilesPerChallenger) + fileKeys := deterministic.SelectFileKeys(keys, anchor.Seed, epochID, s.identity, scFilesPerChallenger) if len(fileKeys) == 0 { return nil } @@ -255,12 +285,12 @@ func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, p func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params, fileKey string) error { epochID := anchor.EpochId - replicas, err := deterministic.SelectReplicaSet(anchor.ActiveSupernodeAccounts, fileKey, params.ScReplicaCount) + replicas, err := deterministic.SelectReplicaSet(anchor.ActiveSupernodeAccounts, fileKey, scReplicaCount) if err != nil { return err } - recipient, observers := pickRecipientAndObservers(replicas, s.identity, int(params.ScObserverThreshold)) + recipient, observers := pickRecipientAndObservers(replicas, s.identity, int(scObserverThreshold)) if recipient == "" { return nil } @@ -283,10 +313,13 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo } requestedStart := uint64(0) - requestedEnd := params.ScMinSliceBytes + requestedEnd := scMinSliceBytes if requestedEnd == 0 { requestedEnd = 1024 } + if scMaxSliceBytes > 0 && requestedEnd > scMaxSliceBytes { + requestedEnd = scMaxSliceBytes + } challengeID := deriveChallengeID(anchor.Seed, epochID, fileKey, s.identity, recipient) @@ -302,7 +335,7 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo ObserverIds: append([]string(nil), observers...), } - resp, err := s.callGetSliceProof(ctx, recipientAddr, req, time.Duration(params.ScResponseTimeoutMs)*time.Millisecond) + resp, err := s.callGetSliceProof(ctx, recipientAddr, req, scResponseTimeout) if err != nil || resp == nil || !resp.Ok { failure := "RECIPIENT_ERROR" if err != nil { @@ -317,7 +350,7 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo } okCount := 0 - required := int(params.ScObserverThreshold) + required := int(scObserverThreshold) if required <= 0 { required = 0 } @@ -336,10 +369,7 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo RecipientId: recipient, } - timeout := time.Duration(params.ScAffirmationTimeoutMs) * time.Millisecond - if timeout <= 0 { - timeout = 30 * time.Second - } + timeout := scAffirmationTimeout for _, peer := range observerPeers { vr, verr := s.callVerifySliceProof(ctx, peer.addr, verifyReq, timeout) @@ -424,9 +454,6 @@ func (s *Service) maybeSubmitEvidence(ctx context.Context, params audittypes.Par if err != nil { return err } - if params.ScEvidenceMaxBytes > 0 && uint64(len(bz)) > params.ScEvidenceMaxBytes { - return fmt.Errorf("evidence metadata too large: %d > %d", len(bz), params.ScEvidenceMaxBytes) - } _, err = s.lumera.AuditMsg().SubmitEvidence(ctx, recipient, audittypes.EvidenceType_EVIDENCE_TYPE_STORAGE_CHALLENGE_FAILURE, "", string(bz)) if err != nil { @@ -500,7 +527,7 @@ func containsString(list []string, v string) bool { } func (s *Service) candidateKeysLookbackDuration(ctx context.Context, params audittypes.Params) time.Duration { - epochs := params.ScCandidateKeysLookbackEpochs + epochs := scCandidateKeysLookbackEpochs if epochs == 0 { epochs = 1 } From 411170dc268d14852ccf64ed1eed927dc7985229 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 11 Feb 2026 04:05:45 +0500 Subject: [PATCH 3/8] Fix storage challenge address parsing --- supernode/storage_challenge/service.go | 75 ++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 10 deletions(-) diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go index cab7d30e..5b0198af 100644 --- a/supernode/storage_challenge/service.go +++ b/supernode/storage_challenge/service.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "net" + "net/url" "sort" "strconv" "strings" @@ -229,10 +230,10 @@ func (s *Service) initClients(ctx context.Context) error { func (s *Service) latestHeight(ctx context.Context) (int64, bool) { resp, err := s.lumera.Node().GetLatestBlock(ctx) - if err != nil || resp == nil || resp.Block == nil { + if err != nil || resp == nil || resp.SdkBlock == nil { return 0, false } - return resp.Block.Header.Height, true + return resp.SdkBlock.Header.Height, true } func (s *Service) auditParams(ctx context.Context) (audittypes.Params, bool) { @@ -401,11 +402,49 @@ func (s *Service) supernodeGRPCAddr(ctx context.Context, supernodeAccount string if err != nil || info == nil { return "", fmt.Errorf("resolve supernode address: %w", err) } - host := strings.TrimSpace(info.LatestAddress) - if host == "" { + raw := strings.TrimSpace(info.LatestAddress) + if raw == "" { return "", fmt.Errorf("no ip address for supernode %s", supernodeAccount) } - return net.JoinHostPort(host, fmt.Sprintf("%d", s.grpcPort)), nil + + // The chain stores the supernode's reachable endpoint. Historically this has often been + // registered as "host:port" (e.g. ":4444"). Storage challenge must tolerate + // both forms: + // - "host" -> use our configured default gRPC port + // - "host:port" -> use the stored port as the dial target + host, port, ok := parseHostAndPort(raw, int(s.grpcPort)) + if !ok || strings.TrimSpace(host) == "" { + return "", fmt.Errorf("invalid supernode address for %s: %q", supernodeAccount, raw) + } + return net.JoinHostPort(strings.TrimSpace(host), strconv.Itoa(port)), nil +} + +// parseHostAndPort parses a "host" or "host:port" string and returns a host and port. +// If a port is not present, defaultPort is returned. If a port is present but invalid, +func parseHostAndPort(address string, defaultPort int) (host string, port int, ok bool) { + address = strings.TrimSpace(address) + if address == "" { + return "", 0, false + } + + // If it looks like a URL, parse and use the host[:port] portion. + if u, err := url.Parse(address); err == nil && u.Host != "" { + address = u.Host + } + + if h, p, err := net.SplitHostPort(address); err == nil { + h = strings.TrimSpace(h) + if h == "" { + return "", 0, false + } + if n, err := strconv.Atoi(p); err == nil && n > 0 && n <= 65535 { + return h, n, true + } + return h, defaultPort, true + } + + // No port present; return default. + return address, defaultPort, true } func (s *Service) callGetSliceProof(ctx context.Context, address string, req *supernode.GetSliceProofRequest, timeout time.Duration) (*supernode.GetSliceProofResponse, error) { @@ -546,11 +585,20 @@ func (s *Service) estimateEpochDuration(ctx context.Context, params audittypes.P } latest, err := s.lumera.Node().GetLatestBlock(ctx) - if err != nil || latest == nil || latest.Block == nil { + if err != nil || latest == nil { + return 0, false + } + var latestHeight int64 + var latestTime time.Time + if sdkBlk := latest.GetSdkBlock(); sdkBlk != nil { + latestHeight = sdkBlk.Header.Height + latestTime = sdkBlk.Header.Time + } else if blk := latest.GetBlock(); blk != nil { + latestHeight = blk.Header.Height + latestTime = blk.Header.Time + } else { return 0, false } - latestHeight := latest.Block.Header.Height - latestTime := latest.Block.Header.Time if latestHeight <= 1 { return 0, false } @@ -564,10 +612,17 @@ func (s *Service) estimateEpochDuration(ctx context.Context, params audittypes.P olderHeight := latestHeight - n older, err := s.lumera.Node().GetBlockByHeight(ctx, olderHeight) - if err != nil || older == nil || older.Block == nil { + if err != nil || older == nil { + return 0, false + } + var olderTime time.Time + if sdkBlk := older.GetSdkBlock(); sdkBlk != nil { + olderTime = sdkBlk.Header.Time + } else if blk := older.GetBlock(); blk != nil { + olderTime = blk.Header.Time + } else { return 0, false } - olderTime := older.Block.Header.Time dt := latestTime.Sub(olderTime) if dt <= 0 { From 1798915f86c0300fa1eca9519e899fcf44c0f759 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 11 Feb 2026 20:20:41 +0500 Subject: [PATCH 4/8] Update go mod --- .github/actions/setup-env/action.yml | 4 ++-- cmd/sncli/go.mod | 2 +- cmd/sncli/go.sum | 4 ++-- go.mod | 6 +++--- go.sum | 2 -- sn-manager/go.mod | 2 +- sn-manager/go.sum | 4 ++-- tests/system/go.mod | 2 +- tests/system/go.sum | 4 ++-- 9 files changed, 14 insertions(+), 16 deletions(-) diff --git a/.github/actions/setup-env/action.yml b/.github/actions/setup-env/action.yml index d8d6e1f4..a8d030a1 100644 --- a/.github/actions/setup-env/action.yml +++ b/.github/actions/setup-env/action.yml @@ -37,10 +37,10 @@ runs: if: ${{ inputs.bust_lumera_retag == 'true' }} shell: bash run: | - echo "Busting go.sum entries for github.com/LumeraProtocol/lumera v1.8.0 (one-time)" + echo "Busting go.sum entries for github.com/LumeraProtocol/lumera v1.11.0-rc (one-time)" # Remove stale checksums in all local modules find . -name 'go.sum' -maxdepth 3 -print0 | xargs -0 -I{} sed -i \ - '/github.com\/LumeraProtocol\/lumera v1.8.0/d' {} + '/github.com\/LumeraProtocol\/lumera v1.11.0-rc/d' {} # Clear module/build caches to avoid cached zips go clean -modcache || true rm -rf "$(go env GOCACHE)" || true diff --git a/cmd/sncli/go.mod b/cmd/sncli/go.mod index e7d4b8ac..6fc6fb86 100644 --- a/cmd/sncli/go.mod +++ b/cmd/sncli/go.mod @@ -11,7 +11,7 @@ replace ( require ( github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c - github.com/LumeraProtocol/lumera v1.10.0 + github.com/LumeraProtocol/lumera v1.11.0-rc github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 github.com/cosmos/cosmos-sdk v0.53.5 github.com/spf13/cobra v1.10.1 diff --git a/cmd/sncli/go.sum b/cmd/sncli/go.sum index a1d658bb..4cc94260 100644 --- a/cmd/sncli/go.sum +++ b/cmd/sncli/go.sum @@ -111,8 +111,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.10.0 h1:IIuvqlFNUPoSkTJ3DoKDNHtr3E0+8GmE4CiNbgTzI2s= -github.com/LumeraProtocol/lumera v1.10.0/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= +github.com/LumeraProtocol/lumera v1.11.0-rc h1:ISJLUhjihuOterLMHpgGWpMZmybR1vmQLNgmSHkc1WA= +github.com/LumeraProtocol/lumera v1.11.0-rc/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= diff --git a/go.mod b/go.mod index c2ad879c..f70095da 100644 --- a/go.mod +++ b/go.mod @@ -3,18 +3,18 @@ module github.com/LumeraProtocol/supernode/v2 go 1.25.5 replace ( + // Local development (monorepo): use local Lumera module. + github.com/LumeraProtocol/lumera => ../lumera github.com/envoyproxy/protoc-gen-validate => github.com/bufbuild/protoc-gen-validate v1.3.0 github.com/lyft/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.3.0 nhooyr.io/websocket => github.com/coder/websocket v1.8.7 - // Local development (monorepo): use local Lumera module. - github.com/LumeraProtocol/lumera => ../lumera ) require ( cosmossdk.io/math v1.5.3 github.com/AlecAivazis/survey/v2 v2.3.7 github.com/DataDog/zstd v1.5.7 - github.com/LumeraProtocol/lumera v1.10.0 + github.com/LumeraProtocol/lumera v1.11.0-rc github.com/LumeraProtocol/rq-go v0.2.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/cenkalti/backoff/v4 v4.3.0 diff --git a/go.sum b/go.sum index 092b6003..f3d054b5 100644 --- a/go.sum +++ b/go.sum @@ -111,8 +111,6 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.10.0 h1:IIuvqlFNUPoSkTJ3DoKDNHtr3E0+8GmE4CiNbgTzI2s= -github.com/LumeraProtocol/lumera v1.10.0/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= diff --git a/sn-manager/go.mod b/sn-manager/go.mod index b5635cf5..f1277575 100644 --- a/sn-manager/go.mod +++ b/sn-manager/go.mod @@ -33,7 +33,7 @@ require ( github.com/99designs/keyring v1.2.2 // indirect github.com/DataDog/datadog-go v4.8.3+incompatible // indirect github.com/DataDog/zstd v1.5.7 // indirect - github.com/LumeraProtocol/lumera v1.10.0 // indirect + github.com/LumeraProtocol/lumera v1.11.0-rc // indirect github.com/Masterminds/semver/v3 v3.3.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect diff --git a/sn-manager/go.sum b/sn-manager/go.sum index 11712337..ad728e77 100644 --- a/sn-manager/go.sum +++ b/sn-manager/go.sum @@ -109,8 +109,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.10.0 h1:IIuvqlFNUPoSkTJ3DoKDNHtr3E0+8GmE4CiNbgTzI2s= -github.com/LumeraProtocol/lumera v1.10.0/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= +github.com/LumeraProtocol/lumera v1.11.0-rc h1:ISJLUhjihuOterLMHpgGWpMZmybR1vmQLNgmSHkc1WA= +github.com/LumeraProtocol/lumera v1.11.0-rc/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= diff --git a/tests/system/go.mod b/tests/system/go.mod index 5520320f..c5daeaf7 100644 --- a/tests/system/go.mod +++ b/tests/system/go.mod @@ -11,7 +11,7 @@ replace ( require ( cosmossdk.io/math v1.5.3 - github.com/LumeraProtocol/lumera v1.10.0 + github.com/LumeraProtocol/lumera v1.11.0-rc github.com/LumeraProtocol/supernode/v2 v2.0.0-00010101000000-000000000000 github.com/cometbft/cometbft v0.38.20 github.com/cosmos/ibc-go/v10 v10.5.0 diff --git a/tests/system/go.sum b/tests/system/go.sum index 0476017a..3378ef5e 100644 --- a/tests/system/go.sum +++ b/tests/system/go.sum @@ -107,8 +107,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/LumeraProtocol/lumera v1.10.0 h1:IIuvqlFNUPoSkTJ3DoKDNHtr3E0+8GmE4CiNbgTzI2s= -github.com/LumeraProtocol/lumera v1.10.0/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= +github.com/LumeraProtocol/lumera v1.11.0-rc h1:ISJLUhjihuOterLMHpgGWpMZmybR1vmQLNgmSHkc1WA= +github.com/LumeraProtocol/lumera v1.11.0-rc/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= From 7311b4a928dde7963925ac0125976f5ba2e6a1d8 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 11 Feb 2026 21:25:46 +0500 Subject: [PATCH 5/8] Go mod fixes + sdk evidence submission --- .github/actions/setup-env/action.yml | 32 +++++++-------- .github/workflows/build&release.yml | 8 ++-- .github/workflows/tests.yml | 12 +++--- go.mod | 2 - go.sum | 2 + sdk/adapters/lumera/adapter.go | 44 ++++++++++++++++++++ sdk/task/cascade.go | 7 ++++ sdk/task/download.go | 7 ++++ sdk/task/evidence.go | 60 ++++++++++++++++++++++++++++ 9 files changed, 146 insertions(+), 28 deletions(-) create mode 100644 sdk/task/evidence.go diff --git a/.github/actions/setup-env/action.yml b/.github/actions/setup-env/action.yml index a8d030a1..41e49b25 100644 --- a/.github/actions/setup-env/action.yml +++ b/.github/actions/setup-env/action.yml @@ -1,11 +1,11 @@ name: Setup Environment description: Sets up Go (dynamically from go.mod) and installs system dependencies -inputs: - bust_lumera_retag: - description: "One-time: remove lumera sums after retag" - required: false - default: 'false' +inputs: {} +# bust_lumera_retag: +# description: "One-time: remove lumera sums after retag" +# required: false +# default: 'false' outputs: go-version: description: "Go version parsed from go.mod" @@ -33,17 +33,17 @@ runs: sudo apt-get update sudo apt-get install -y libwebp-dev make - - name: One-time reset retagged lumera checksums - if: ${{ inputs.bust_lumera_retag == 'true' }} - shell: bash - run: | - echo "Busting go.sum entries for github.com/LumeraProtocol/lumera v1.11.0-rc (one-time)" - # Remove stale checksums in all local modules - find . -name 'go.sum' -maxdepth 3 -print0 | xargs -0 -I{} sed -i \ - '/github.com\/LumeraProtocol\/lumera v1.11.0-rc/d' {} - # Clear module/build caches to avoid cached zips - go clean -modcache || true - rm -rf "$(go env GOCACHE)" || true + # - name: One-time reset retagged lumera checksums + # if: ${{ inputs.bust_lumera_retag == 'true' }} + # shell: bash + # run: | + # echo "Busting go.sum entries for github.com/LumeraProtocol/lumera v1.11.0-rc (one-time)" + # # Remove stale checksums in all local modules + # find . -name 'go.sum' -maxdepth 3 -print0 | xargs -0 -I{} sed -i \ + # '/github.com\/LumeraProtocol\/lumera v1.11.0-rc/d' {} + # # Clear module/build caches to avoid cached zips + # go clean -modcache || true + # rm -rf "$(go env GOCACHE)" || true - name: Set Go Private Modules shell: bash diff --git a/.github/workflows/build&release.yml b/.github/workflows/build&release.yml index 94e9b9ba..1f8f7d66 100644 --- a/.github/workflows/build&release.yml +++ b/.github/workflows/build&release.yml @@ -27,8 +27,8 @@ jobs: - name: Setup Go and dependencies uses: ./.github/actions/setup-env - with: - bust_lumera_retag: 'true' + # with: + # bust_lumera_retag: 'true' - name: Build binaries run: | @@ -74,8 +74,8 @@ jobs: - name: Setup Go and dependencies uses: ./.github/actions/setup-env - with: - bust_lumera_retag: 'true' + # with: + # bust_lumera_retag: 'true' - name: Prepare Release Variables id: vars diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e79c3b82..26796204 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,8 +17,8 @@ jobs: uses: actions/checkout@v6.0.1 - name: Setup Go and system deps uses: ./.github/actions/setup-env - with: - bust_lumera_retag: 'true' + # with: + # bust_lumera_retag: 'true' - name: Go mod tidy run: go mod tidy @@ -35,8 +35,8 @@ jobs: - name: Setup Go and system deps uses: ./.github/actions/setup-env - with: - bust_lumera_retag: 'true' + # with: + # bust_lumera_retag: 'true' - name: Go mod tidy run: go mod tidy @@ -54,8 +54,8 @@ jobs: - name: Setup Go and system deps uses: ./.github/actions/setup-env - with: - bust_lumera_retag: 'true' + # with: + # bust_lumera_retag: 'true' - name: Go mod tidy run: go mod tidy diff --git a/go.mod b/go.mod index f70095da..f11b6800 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,6 @@ module github.com/LumeraProtocol/supernode/v2 go 1.25.5 replace ( - // Local development (monorepo): use local Lumera module. - github.com/LumeraProtocol/lumera => ../lumera github.com/envoyproxy/protoc-gen-validate => github.com/bufbuild/protoc-gen-validate v1.3.0 github.com/lyft/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.3.0 nhooyr.io/websocket => github.com/coder/websocket v1.8.7 diff --git a/go.sum b/go.sum index f3d054b5..a6c9eaa4 100644 --- a/go.sum +++ b/go.sum @@ -111,6 +111,8 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0 h1:ig/FpDD2JofP/NExKQUbn7uOSZzJAQqogfqluZK4ed4= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.50.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/LumeraProtocol/lumera v1.11.0-rc h1:ISJLUhjihuOterLMHpgGWpMZmybR1vmQLNgmSHkc1WA= +github.com/LumeraProtocol/lumera v1.11.0-rc/go.mod h1:p2sZZG3bLzSBdaW883qjuU3DXXY4NJzTTwLywr8uI0w= github.com/LumeraProtocol/rq-go v0.2.1 h1:8B3UzRChLsGMmvZ+UVbJsJj6JZzL9P9iYxbdUwGsQI4= github.com/LumeraProtocol/rq-go v0.2.1/go.mod h1:APnKCZRh1Es2Vtrd2w4kCLgAyaL5Bqrkz/BURoRJ+O8= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= diff --git a/sdk/adapters/lumera/adapter.go b/sdk/adapters/lumera/adapter.go index 7b40ddf7..e7b1e678 100644 --- a/sdk/adapters/lumera/adapter.go +++ b/sdk/adapters/lumera/adapter.go @@ -2,11 +2,13 @@ package lumera import ( "context" + "encoding/json" "fmt" "sort" "strings" "time" + audittypes "github.com/LumeraProtocol/lumera/x/audit/v1/types" "github.com/LumeraProtocol/supernode/v2/sdk/log" actiontypes "github.com/LumeraProtocol/lumera/x/action/v1/types" @@ -381,6 +383,48 @@ func (a *Adapter) GetBalance(ctx context.Context, address string, denom string) return resp, nil } +// SubmitCascadeClientFailureEvidence submits client-observed cascade failure evidence to x/audit. +func (a *Adapter) SubmitCascadeClientFailureEvidence( + ctx context.Context, + subjectAddress string, + actionID string, + targetSupernodeAccounts []string, + details map[string]string, +) error { + if a.client == nil { + return fmt.Errorf("lumera client is nil") + } + subjectAddress = strings.TrimSpace(subjectAddress) + if subjectAddress == "" { + return fmt.Errorf("subject address cannot be empty") + } + if details == nil { + details = map[string]string{} + } + + meta := audittypes.CascadeClientFailureEvidenceMetadata{ + ReporterComponent: audittypes.CascadeClientFailureReporterComponent_CASCADE_CLIENT_FAILURE_REPORTER_COMPONENT_SDK_GO, + TargetSupernodeAccounts: append([]string(nil), targetSupernodeAccounts...), + Details: details, + } + bz, err := json.Marshal(meta) + if err != nil { + return fmt.Errorf("marshal cascade client failure evidence metadata: %w", err) + } + + _, err = a.client.AuditMsg().SubmitEvidence( + ctx, + subjectAddress, + audittypes.EvidenceType_EVIDENCE_TYPE_CASCADE_CLIENT_FAILURE, + actionID, + string(bz), + ) + if err != nil { + return fmt.Errorf("submit cascade client failure evidence: %w", err) + } + return nil +} + // DecodeCascadeMetadata decodes the raw metadata bytes into CascadeMetadata func (a *Adapter) DecodeCascadeMetadata(ctx context.Context, action Action) (actiontypes.CascadeMetadata, error) { if action.ActionType != "ACTION_TYPE_CASCADE" { diff --git a/sdk/task/cascade.go b/sdk/task/cascade.go index 2a5a8f5a..5fe7315c 100644 --- a/sdk/task/cascade.go +++ b/sdk/task/cascade.go @@ -139,6 +139,13 @@ func (t *CascadeTask) registerWithSupernodes(ctx context.Context, supernodes lum event.KeyIteration: iteration, event.KeyError: err.Error(), }) + t.submitCascadeClientFailureEvidence(ctx, sn.CosmosAddress, []string{sn.CosmosAddress}, map[string]string{ + "operation": "register", + "iteration": fmt.Sprintf("%d", iteration), + "supernode_endpoint": sn.GrpcEndpoint, + "supernode_account": sn.CosmosAddress, + "error": err.Error(), + }) lastErr = err continue } diff --git a/sdk/task/download.go b/sdk/task/download.go index 98e0578e..20187312 100644 --- a/sdk/task/download.go +++ b/sdk/task/download.go @@ -131,6 +131,13 @@ func (t *CascadeDownloadTask) downloadFromSupernodes(ctx context.Context, supern event.KeyIteration: iteration, event.KeyError: err.Error(), }) + t.submitCascadeClientFailureEvidence(ctx, sn.CosmosAddress, []string{sn.CosmosAddress}, map[string]string{ + "operation": "download", + "iteration": fmt.Sprintf("%d", iteration), + "supernode_endpoint": sn.GrpcEndpoint, + "supernode_account": sn.CosmosAddress, + "error": err.Error(), + }) lastErr = err continue } diff --git a/sdk/task/evidence.go b/sdk/task/evidence.go new file mode 100644 index 00000000..e6a397c5 --- /dev/null +++ b/sdk/task/evidence.go @@ -0,0 +1,60 @@ +package task + +import ( + "context" + "strings" +) + +// Optional interface so existing test doubles that only implement the base +// sdk/adapters/lumera.Client interface remain valid. +type cascadeClientFailureEvidenceSubmitter interface { + SubmitCascadeClientFailureEvidence( + ctx context.Context, + subjectAddress string, + actionID string, + targetSupernodeAccounts []string, + details map[string]string, + ) error +} + +func (t *BaseTask) submitCascadeClientFailureEvidence( + ctx context.Context, + subjectAddress string, + targetSupernodeAccounts []string, + details map[string]string, +) { + subjectAddress = strings.TrimSpace(subjectAddress) + if subjectAddress == "" { + return + } + + submitter, ok := any(t.client).(cascadeClientFailureEvidenceSubmitter) + if !ok { + t.logger.Debug(ctx, "Cascade client failure evidence submitter not configured") + return + } + + if details == nil { + details = map[string]string{} + } + if _, exists := details["task_id"]; !exists { + details["task_id"] = t.TaskID + } + if _, exists := details["action_id"]; !exists { + details["action_id"] = t.ActionID + } + + if err := submitter.SubmitCascadeClientFailureEvidence( + ctx, + subjectAddress, + t.ActionID, + targetSupernodeAccounts, + details, + ); err != nil { + t.logger.Warn(ctx, "Failed to submit cascade client failure evidence", + "subject_address", subjectAddress, + "targets", targetSupernodeAccounts, + "error", err, + ) + } +} From b354613d2705ecc9231eb54c3af39258fe9405d4 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 11 Feb 2026 22:10:48 +0500 Subject: [PATCH 6/8] Fix handshake race and evidence flow --- .../alts/handshake/handshake_test.go | 16 ++++---- sdk/task/evidence.go | 39 +++++++++++++------ supernode/storage_challenge/service.go | 10 ++++- .../grpc/storage_challenge/handler.go | 3 +- 4 files changed, 45 insertions(+), 23 deletions(-) diff --git a/pkg/net/credentials/alts/handshake/handshake_test.go b/pkg/net/credentials/alts/handshake/handshake_test.go index 24981c29..ea130d67 100644 --- a/pkg/net/credentials/alts/handshake/handshake_test.go +++ b/pkg/net/credentials/alts/handshake/handshake_test.go @@ -211,9 +211,6 @@ func (h *hsInterceptor) cleanup() { } func TestHandshakerConcurrentHandshakes(t *testing.T) { - clientKr := CreateTestKeyring() - serverKr := CreateTestKeyring() - testCases := []struct { name string numHandshakes int @@ -307,11 +304,14 @@ func TestHandshakerConcurrentHandshakes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - // Create handshake pairs - for i := range tc.numHandshakes { - accountClient := fmt.Sprintf("client-%d", i) - accountServer := fmt.Sprintf("server-%d", i) - testAccounts := SetupTestAccounts(t, clientKr, []string{accountClient}) + // Create handshake pairs + for i := range tc.numHandshakes { + clientKr := CreateTestKeyring() + serverKr := CreateTestKeyring() + + accountClient := fmt.Sprintf("client-%d", i) + accountServer := fmt.Sprintf("server-%d", i) + testAccounts := SetupTestAccounts(t, clientKr, []string{accountClient}) clientAddr := testAccounts[0].Address testAccounts = SetupTestAccounts(t, serverKr, []string{accountServer}) diff --git a/sdk/task/evidence.go b/sdk/task/evidence.go index e6a397c5..a3600cb8 100644 --- a/sdk/task/evidence.go +++ b/sdk/task/evidence.go @@ -3,6 +3,7 @@ package task import ( "context" "strings" + "time" ) // Optional interface so existing test doubles that only implement the base @@ -17,6 +18,8 @@ type cascadeClientFailureEvidenceSubmitter interface { ) error } +const cascadeEvidenceSubmitTimeout = 10 * time.Second + func (t *BaseTask) submitCascadeClientFailureEvidence( ctx context.Context, subjectAddress string, @@ -44,17 +47,29 @@ func (t *BaseTask) submitCascadeClientFailureEvidence( details["action_id"] = t.ActionID } - if err := submitter.SubmitCascadeClientFailureEvidence( - ctx, - subjectAddress, - t.ActionID, - targetSupernodeAccounts, - details, - ); err != nil { - t.logger.Warn(ctx, "Failed to submit cascade client failure evidence", - "subject_address", subjectAddress, - "targets", targetSupernodeAccounts, - "error", err, - ) + targetsCopy := append([]string(nil), targetSupernodeAccounts...) + detailsCopy := make(map[string]string, len(details)) + for k, v := range details { + detailsCopy[k] = v } + + // Evidence submission should not block retry loops. + go func(parent context.Context, subject string, actionID string, targets []string, metadata map[string]string) { + submitCtx, cancel := context.WithTimeout(context.WithoutCancel(parent), cascadeEvidenceSubmitTimeout) + defer cancel() + + if err := submitter.SubmitCascadeClientFailureEvidence( + submitCtx, + subject, + actionID, + targets, + metadata, + ); err != nil { + t.logger.Warn(submitCtx, "Failed to submit cascade client failure evidence", + "subject_address", subject, + "targets", targets, + "error", err, + ) + } + }(ctx, subjectAddress, t.ActionID, targetsCopy, detailsCopy) } diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go index 5b0198af..2e868410 100644 --- a/supernode/storage_challenge/service.go +++ b/supernode/storage_challenge/service.go @@ -230,10 +230,16 @@ func (s *Service) initClients(ctx context.Context) error { func (s *Service) latestHeight(ctx context.Context) (int64, bool) { resp, err := s.lumera.Node().GetLatestBlock(ctx) - if err != nil || resp == nil || resp.SdkBlock == nil { + if err != nil || resp == nil { return 0, false } - return resp.SdkBlock.Header.Height, true + if sdkBlk := resp.GetSdkBlock(); sdkBlk != nil { + return sdkBlk.Header.Height, true + } + if blk := resp.GetBlock(); blk != nil { + return blk.Header.Height, true + } + return 0, false } func (s *Service) auditParams(ctx context.Context) (audittypes.Params, bool) { diff --git a/supernode/transport/grpc/storage_challenge/handler.go b/supernode/transport/grpc/storage_challenge/handler.go index 732790d4..2b70ba0f 100644 --- a/supernode/transport/grpc/storage_challenge/handler.go +++ b/supernode/transport/grpc/storage_challenge/handler.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "strings" "time" "github.com/LumeraProtocol/supernode/v2/gen/supernode" @@ -93,7 +94,7 @@ func (s *Server) VerifySliceProof(ctx context.Context, req *supernode.VerifySlic sum := blake3.Sum256(req.Slice) want := req.ProofHashHex got := hex.EncodeToString(sum[:]) - ok := got == want + ok := strings.EqualFold(got, want) errStr := "" if !ok { errStr = fmt.Sprintf("proof mismatch: want=%s got=%s", want, got) From 7f7aefc42e79f891c5fbbb055ce33bdad3a1260b Mon Sep 17 00:00:00 2001 From: j-rafique Date: Wed, 11 Feb 2026 22:19:42 +0500 Subject: [PATCH 7/8] fix indentations --- pkg/cascadekit/cascadekit_test.go | 2 +- pkg/cascadekit/ids.go | 2 +- .../credentials/alts/handshake/handshake_test.go | 16 ++++++++-------- supernode/supernode_metrics/gateway_url_test.go | 1 - .../securegrpc/secure_connection_test.go | 2 +- 5 files changed, 11 insertions(+), 12 deletions(-) diff --git a/pkg/cascadekit/cascadekit_test.go b/pkg/cascadekit/cascadekit_test.go index ce07785f..eb8e4f03 100644 --- a/pkg/cascadekit/cascadekit_test.go +++ b/pkg/cascadekit/cascadekit_test.go @@ -4,8 +4,8 @@ import ( "encoding/base64" "testing" - "github.com/LumeraProtocol/supernode/v2/pkg/codec" "github.com/DataDog/zstd" + "github.com/LumeraProtocol/supernode/v2/pkg/codec" ) func TestExtractIndexAndCreatorSig_Strict(t *testing.T) { diff --git a/pkg/cascadekit/ids.go b/pkg/cascadekit/ids.go index 098c5ce8..ac70d38b 100644 --- a/pkg/cascadekit/ids.go +++ b/pkg/cascadekit/ids.go @@ -4,10 +4,10 @@ import ( "bytes" "strconv" + "github.com/DataDog/zstd" "github.com/LumeraProtocol/supernode/v2/pkg/errors" "github.com/LumeraProtocol/supernode/v2/pkg/utils" "github.com/cosmos/btcutil/base58" - "github.com/DataDog/zstd" ) // GenerateLayoutIDs computes IDs for redundant layout files (not the final index IDs). diff --git a/pkg/net/credentials/alts/handshake/handshake_test.go b/pkg/net/credentials/alts/handshake/handshake_test.go index ea130d67..8bcb5cb2 100644 --- a/pkg/net/credentials/alts/handshake/handshake_test.go +++ b/pkg/net/credentials/alts/handshake/handshake_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - "go.uber.org/mock/gomock" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" lumeraidmocks "github.com/LumeraProtocol/lumera/x/lumeraid/mocks" "github.com/LumeraProtocol/lumera/x/lumeraid/securekeyx" @@ -304,14 +304,14 @@ func TestHandshakerConcurrentHandshakes(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - // Create handshake pairs - for i := range tc.numHandshakes { - clientKr := CreateTestKeyring() - serverKr := CreateTestKeyring() + // Create handshake pairs + for i := range tc.numHandshakes { + clientKr := CreateTestKeyring() + serverKr := CreateTestKeyring() - accountClient := fmt.Sprintf("client-%d", i) - accountServer := fmt.Sprintf("server-%d", i) - testAccounts := SetupTestAccounts(t, clientKr, []string{accountClient}) + accountClient := fmt.Sprintf("client-%d", i) + accountServer := fmt.Sprintf("server-%d", i) + testAccounts := SetupTestAccounts(t, clientKr, []string{accountClient}) clientAddr := testAccounts[0].Address testAccounts = SetupTestAccounts(t, serverKr, []string{accountServer}) diff --git a/supernode/supernode_metrics/gateway_url_test.go b/supernode/supernode_metrics/gateway_url_test.go index f9d4c0e4..024a2e7e 100644 --- a/supernode/supernode_metrics/gateway_url_test.go +++ b/supernode/supernode_metrics/gateway_url_test.go @@ -25,4 +25,3 @@ func TestGatewayStatusURL_IPv6BracketedHost(t *testing.T) { t.Fatalf("got %q want %q", got, want) } } - diff --git a/tests/integration/securegrpc/secure_connection_test.go b/tests/integration/securegrpc/secure_connection_test.go index 8ef89a90..fa0ab919 100644 --- a/tests/integration/securegrpc/secure_connection_test.go +++ b/tests/integration/securegrpc/secure_connection_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" - "go.uber.org/mock/gomock" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" From 5a09a0a4b2eb83a0fc1705816d0e37092d9237a3 Mon Sep 17 00:00:00 2001 From: j-rafique Date: Fri, 13 Feb 2026 17:33:18 +0500 Subject: [PATCH 8/8] Adress formatting fix --- supernode/storage_challenge/service.go | 109 +++++++++++++++++++++---- 1 file changed, 93 insertions(+), 16 deletions(-) diff --git a/supernode/storage_challenge/service.go b/supernode/storage_challenge/service.go index 2e868410..330de748 100644 --- a/supernode/storage_challenge/service.go +++ b/supernode/storage_challenge/service.go @@ -128,15 +128,35 @@ func (s *Service) Run(ctx context.Context) error { return nil } - if err := s.initClients(ctx); err != nil { + if err := s.initClients(); err != nil { return err } + // Effective knobs (production defaults). Jitter is bounded by the epoch length + // to avoid sleeping past the epoch window on short epochs. + lookbackEpochs := scCandidateKeysLookbackEpochs + respTimeout := scResponseTimeout + affirmTimeout := scAffirmationTimeout + logtrace.Debug(ctx, "storage challenge runtime knobs", logtrace.Fields{ + "start_jitter_ms": scStartJitterMs, + "response_timeout_ms": respTimeout.Milliseconds(), + "affirmation_timeout_ms": affirmTimeout.Milliseconds(), + "submit_evidence_config": s.cfg.SubmitEvidence, + "poll_interval_ms": s.cfg.PollInterval.Milliseconds(), + "sc_files_per_challenger": scFilesPerChallenger, + "sc_replica_count": scReplicaCount, + "sc_observer_threshold": scObserverThreshold, + "sc_keys_lookback_epochs": lookbackEpochs, + }) + ticker := time.NewTicker(s.cfg.PollInterval) defer ticker.Stop() var lastRunEpoch uint64 var lastRunOK bool + var loggedAlreadyRanEpoch uint64 + var loggedNotSelectedEpoch uint64 + var loggedDisabledEpoch uint64 for { select { @@ -158,11 +178,19 @@ func (s *Service) Run(ctx context.Context) error { continue } if !params.ScEnabled { + if loggedDisabledEpoch != epochID { + logtrace.Debug(ctx, "storage challenge disabled by on-chain params", logtrace.Fields{"epoch_id": epochID}) + loggedDisabledEpoch = epochID + } lastRunEpoch = epochID lastRunOK = true continue } if lastRunOK && lastRunEpoch == epochID { + if loggedAlreadyRanEpoch != epochID { + logtrace.Debug(ctx, "storage challenge already ran this epoch; skipping", logtrace.Fields{"epoch_id": epochID}) + loggedAlreadyRanEpoch = epochID + } continue } @@ -175,13 +203,36 @@ func (s *Service) Run(ctx context.Context) error { challengers := deterministic.SelectChallengers(anchor.ActiveSupernodeAccounts, anchor.Seed, epochID, params.ScChallengersPerEpoch) if !containsString(challengers, s.identity) { + if loggedNotSelectedEpoch != epochID { + logtrace.Debug(ctx, "storage challenge: not selected challenger; skipping", logtrace.Fields{ + "epoch_id": epochID, + "identity": s.identity, + "selected": len(challengers), + "sc_param": params.ScChallengersPerEpoch, + }) + loggedNotSelectedEpoch = epochID + } lastRunEpoch = epochID lastRunOK = true continue } - jitterMs := deterministic.DeterministicJitterMs(anchor.Seed, epochID, s.identity, scStartJitterMs) + // Bound jitter by a conservative estimate of epoch duration (assume ~1s blocks). + // This is intentionally simple and is primarily to avoid sleeping past the epoch window. + jitterMaxMs := scStartJitterMs + epochBudgetMs := uint64(params.EpochLengthBlocks) * 1000 + if epochBudgetMs > 0 && epochBudgetMs/2 < jitterMaxMs { + jitterMaxMs = epochBudgetMs / 2 + } + + jitterMs := deterministic.DeterministicJitterMs(anchor.Seed, epochID, s.identity, jitterMaxMs) if jitterMs > 0 { + logtrace.Debug(ctx, "storage challenge jitter sleep", logtrace.Fields{ + "epoch_id": epochID, + "jitter_ms": jitterMs, + "jitter_max_ms": jitterMaxMs, + "challenger_id": s.identity, + }) timer := time.NewTimer(time.Duration(jitterMs) * time.Millisecond) select { case <-ctx.Done(): @@ -191,7 +242,7 @@ func (s *Service) Run(ctx context.Context) error { } } - if err := s.runEpoch(ctx, anchor, params); err != nil { + if err := s.runEpoch(ctx, anchor, params, lookbackEpochs, respTimeout, affirmTimeout); err != nil { logtrace.Warn(ctx, "storage challenge epoch run error", logtrace.Fields{ "epoch_id": epochID, "error": err.Error(), @@ -207,7 +258,7 @@ func (s *Service) Run(ctx context.Context) error { } } -func (s *Service) initClients(ctx context.Context) error { +func (s *Service) initClients() error { validator := lumera.NewSecureKeyExchangeValidator(s.lumera) grpcCreds, err := credentials.NewClientCreds(&credentials.ClientOptions{ @@ -254,10 +305,10 @@ func (s *Service) auditParams(ctx context.Context) (audittypes.Params, bool) { return p, true } -func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params) error { +func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params, lookbackEpochs uint32, respTimeout time.Duration, affirmTimeout time.Duration) error { epochID := anchor.EpochId - lookback := s.candidateKeysLookbackDuration(ctx, params) + lookback := s.candidateKeysLookbackDuration(ctx, params, lookbackEpochs) to := time.Now().UTC() from := to.Add(-lookback) @@ -275,9 +326,15 @@ func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, p if len(fileKeys) == 0 { return nil } + logtrace.Debug(ctx, "storage challenge selected file keys", logtrace.Fields{ + "epoch_id": epochID, + "challenger_id": s.identity, + "keys_total": len(keys), + "file_keys": strings.Join(fileKeys, ","), + }) for _, fileKey := range fileKeys { - if err := s.runChallengeForFile(ctx, anchor, params, fileKey); err != nil { + if err := s.runChallengeForFile(ctx, anchor, params, fileKey, respTimeout, affirmTimeout); err != nil { logtrace.Warn(ctx, "storage challenge file run error", logtrace.Fields{ "epoch_id": epochID, "file_key": fileKey, @@ -289,7 +346,7 @@ func (s *Service) runEpoch(ctx context.Context, anchor audittypes.EpochAnchor, p return nil } -func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params, fileKey string) error { +func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.EpochAnchor, params audittypes.Params, fileKey string, respTimeout time.Duration, affirmTimeout time.Duration) error { epochID := anchor.EpochId replicas, err := deterministic.SelectReplicaSet(anchor.ActiveSupernodeAccounts, fileKey, scReplicaCount) @@ -301,6 +358,13 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo if recipient == "" { return nil } + logtrace.Debug(ctx, "storage challenge selected recipient/observers", logtrace.Fields{ + "epoch_id": epochID, + "file_key": fileKey, + "challenger_id": s.identity, + "recipient_id": recipient, + "observers": strings.Join(observers, ","), + }) recipientAddr, err := s.supernodeGRPCAddr(ctx, recipient) if err != nil { @@ -342,7 +406,7 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo ObserverIds: append([]string(nil), observers...), } - resp, err := s.callGetSliceProof(ctx, recipientAddr, req, scResponseTimeout) + resp, err := s.callGetSliceProof(ctx, recipient, recipientAddr, req, respTimeout) if err != nil || resp == nil || !resp.Ok { failure := "RECIPIENT_ERROR" if err != nil { @@ -377,9 +441,12 @@ func (s *Service) runChallengeForFile(ctx context.Context, anchor audittypes.Epo } timeout := scAffirmationTimeout + if affirmTimeout > 0 { + timeout = affirmTimeout + } for _, peer := range observerPeers { - vr, verr := s.callVerifySliceProof(ctx, peer.addr, verifyReq, timeout) + vr, verr := s.callVerifySliceProof(ctx, peer.id, peer.addr, verifyReq, timeout) if verr == nil && vr != nil && vr.Ok { okCount++ } @@ -453,11 +520,13 @@ func parseHostAndPort(address string, defaultPort int) (host string, port int, o return address, defaultPort, true } -func (s *Service) callGetSliceProof(ctx context.Context, address string, req *supernode.GetSliceProofRequest, timeout time.Duration) (*supernode.GetSliceProofResponse, error) { +func (s *Service) callGetSliceProof(ctx context.Context, remoteIdentity string, address string, req *supernode.GetSliceProofRequest, timeout time.Duration) (*supernode.GetSliceProofResponse, error) { cctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - conn, err := s.grpcClient.Connect(cctx, address, s.grpcOpts) + // secure gRPC requires the peer identity in the dial target + // (format: "@") so the handshake can authenticate the peer. + conn, err := s.grpcClient.Connect(cctx, fmt.Sprintf("%s@%s", strings.TrimSpace(remoteIdentity), address), s.grpcOpts) if err != nil { return nil, err } @@ -467,11 +536,12 @@ func (s *Service) callGetSliceProof(ctx context.Context, address string, req *su return client.GetSliceProof(cctx, req) } -func (s *Service) callVerifySliceProof(ctx context.Context, address string, req *supernode.VerifySliceProofRequest, timeout time.Duration) (*supernode.VerifySliceProofResponse, error) { +func (s *Service) callVerifySliceProof(ctx context.Context, remoteIdentity string, address string, req *supernode.VerifySliceProofRequest, timeout time.Duration) (*supernode.VerifySliceProofResponse, error) { cctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - conn, err := s.grpcClient.Connect(cctx, address, s.grpcOpts) + // Production behavior: secure gRPC requires "@" (see callGetSliceProof). + conn, err := s.grpcClient.Connect(cctx, fmt.Sprintf("%s@%s", strings.TrimSpace(remoteIdentity), address), s.grpcOpts) if err != nil { return nil, err } @@ -483,6 +553,14 @@ func (s *Service) callVerifySliceProof(ctx context.Context, address string, req func (s *Service) maybeSubmitEvidence(ctx context.Context, params audittypes.Params, epochID uint64, challengeID, fileKey, recipient, failureType, transcriptHashHex string) error { if !s.cfg.SubmitEvidence || !params.ScEnabled { + logtrace.Debug(ctx, "storage challenge: evidence submission skipped", logtrace.Fields{ + "epoch_id": epochID, + "challenge_id": challengeID, + "recipient_id": recipient, + "failure_type": failureType, + "submit_evidence_config": s.cfg.SubmitEvidence, + "sc_enabled_param": params.ScEnabled, + }) return nil } @@ -571,8 +649,7 @@ func containsString(list []string, v string) bool { return false } -func (s *Service) candidateKeysLookbackDuration(ctx context.Context, params audittypes.Params) time.Duration { - epochs := scCandidateKeysLookbackEpochs +func (s *Service) candidateKeysLookbackDuration(ctx context.Context, params audittypes.Params, epochs uint32) time.Duration { if epochs == 0 { epochs = 1 }