diff --git a/buf.gen.yaml b/buf.gen.yaml new file mode 100644 index 0000000..1fb3223 --- /dev/null +++ b/buf.gen.yaml @@ -0,0 +1,10 @@ +version: v2 +plugins: + - remote: buf.build/protocolbuffers/go + out: pkg/api/grpc/gen + opt: paths=source_relative + - remote: buf.build/grpc/go + out: pkg/api/grpc/gen + opt: paths=source_relative +inputs: + - directory: proto diff --git a/buf.yaml b/buf.yaml new file mode 100644 index 0000000..227c4a6 --- /dev/null +++ b/buf.yaml @@ -0,0 +1,9 @@ +version: v2 +modules: + - path: proto +lint: + use: + - DEFAULT +breaking: + use: + - FILE diff --git a/cmd/apex/main.go b/cmd/apex/main.go index f40fed4..7f0397e 100644 --- a/cmd/apex/main.go +++ b/cmd/apex/main.go @@ -4,18 +4,25 @@ import ( "context" "errors" "fmt" + "net" + "net/http" "os" "os/signal" "syscall" + "time" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/evstack/apex/config" + "github.com/evstack/apex/pkg/api" + grpcapi "github.com/evstack/apex/pkg/api/grpc" + jsonrpcapi "github.com/evstack/apex/pkg/api/jsonrpc" "github.com/evstack/apex/pkg/fetch" "github.com/evstack/apex/pkg/store" syncer "github.com/evstack/apex/pkg/sync" + "github.com/evstack/apex/pkg/types" ) // Set via ldflags at build time. @@ -148,20 +155,79 @@ func runIndexer(ctx context.Context, cfg *config.Config) error { } defer fetcher.Close() //nolint:errcheck - // Build and run the sync coordinator. + // Set up API layer. + notifier := api.NewNotifier(cfg.Subscription.BufferSize, log.Logger) + svc := api.NewService(db, fetcher, fetcher, notifier, log.Logger) + + // Build and run the sync coordinator with observer hook. coord := syncer.New(db, fetcher, syncer.WithStartHeight(cfg.Sync.StartHeight), syncer.WithBatchSize(cfg.Sync.BatchSize), syncer.WithConcurrency(cfg.Sync.Concurrency), syncer.WithLogger(log.Logger), + syncer.WithObserver(func(h uint64, hdr *types.Header, blobs []types.Blob) { + notifier.Publish(api.HeightEvent{Height: h, Header: hdr, Blobs: blobs}) + }), ) + // Start JSON-RPC server. + rpcServer := jsonrpcapi.NewServer(svc, log.Logger) + httpSrv := &http.Server{ + Addr: cfg.RPC.ListenAddr, + Handler: rpcServer, + ReadHeaderTimeout: 10 * time.Second, + } + + go func() { + log.Info().Str("addr", cfg.RPC.ListenAddr).Msg("JSON-RPC server listening") + if err := httpSrv.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Error().Err(err).Msg("JSON-RPC server error") + } + }() + + // Start gRPC server. + grpcSrv := grpcapi.NewServer(svc, log.Logger) + lis, err := net.Listen("tcp", cfg.RPC.GRPCListenAddr) + if err != nil { + _ = httpSrv.Close() + return fmt.Errorf("listen gRPC: %w", err) + } + + go func() { + log.Info().Str("addr", cfg.RPC.GRPCListenAddr).Msg("gRPC server listening") + if err := grpcSrv.Serve(lis); err != nil { + log.Error().Err(err).Msg("gRPC server error") + } + }() + log.Info(). Int("namespaces", len(namespaces)). Uint64("start_height", cfg.Sync.StartHeight). Msg("sync coordinator starting") err = coord.Run(ctx) + + // Graceful shutdown. + stopped := make(chan struct{}) + go func() { + grpcSrv.GracefulStop() + close(stopped) + }() + + grpcTimeout := time.After(5 * time.Second) + select { + case <-stopped: + case <-grpcTimeout: + log.Warn().Msg("gRPC graceful stop timed out, forcing stop") + grpcSrv.Stop() + } + + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer shutdownCancel() + if shutdownErr := httpSrv.Shutdown(shutdownCtx); shutdownErr != nil { + log.Error().Err(shutdownErr).Msg("JSON-RPC server shutdown error") + } + if err != nil && !errors.Is(err, context.Canceled) { return fmt.Errorf("coordinator: %w", err) } diff --git a/config/config.go b/config/config.go index d7f5dc7..60fe95b 100644 --- a/config/config.go +++ b/config/config.go @@ -8,11 +8,12 @@ import ( // Config is the top-level configuration for the Apex indexer. type Config struct { - DataSource DataSourceConfig `yaml:"data_source"` - Storage StorageConfig `yaml:"storage"` - RPC RPCConfig `yaml:"rpc"` - Sync SyncConfig `yaml:"sync"` - Log LogConfig `yaml:"log"` + DataSource DataSourceConfig `yaml:"data_source"` + Storage StorageConfig `yaml:"storage"` + RPC RPCConfig `yaml:"rpc"` + Sync SyncConfig `yaml:"sync"` + Subscription SubscriptionConfig `yaml:"subscription"` + Log LogConfig `yaml:"log"` } // DataSourceConfig configures the Celestia node connection. @@ -27,9 +28,10 @@ type StorageConfig struct { DBPath string `yaml:"db_path"` } -// RPCConfig configures the HTTP API server. +// RPCConfig configures the API servers. type RPCConfig struct { - ListenAddr string `yaml:"listen_addr"` + ListenAddr string `yaml:"listen_addr"` + GRPCListenAddr string `yaml:"grpc_listen_addr"` } // SyncConfig configures the sync coordinator. @@ -39,6 +41,11 @@ type SyncConfig struct { Concurrency int `yaml:"concurrency"` } +// SubscriptionConfig configures API event subscriptions. +type SubscriptionConfig struct { + BufferSize int `yaml:"buffer_size"` +} + // LogConfig configures logging. type LogConfig struct { Level string `yaml:"level"` @@ -55,12 +62,16 @@ func DefaultConfig() Config { DBPath: "apex.db", }, RPC: RPCConfig{ - ListenAddr: ":8080", + ListenAddr: ":8080", + GRPCListenAddr: ":9090", }, Sync: SyncConfig{ BatchSize: 64, Concurrency: 4, }, + Subscription: SubscriptionConfig{ + BufferSize: 64, + }, Log: LogConfig{ Level: "info", Format: "json", diff --git a/config/load.go b/config/load.go index e1d1809..efe8958 100644 --- a/config/load.go +++ b/config/load.go @@ -43,8 +43,10 @@ storage: db_path: "apex.db" rpc: - # Address for the HTTP API server + # Address for the JSON-RPC API server (HTTP/WebSocket) listen_addr: ":8080" + # Address for the gRPC API server + grpc_listen_addr: ":9090" sync: # Height to start syncing from (0 = genesis) @@ -54,6 +56,10 @@ sync: # Number of concurrent fetch workers concurrency: 4 +subscription: + # Event buffer size per subscriber (for API subscriptions) + buffer_size: 64 + log: # Log level: trace, debug, info, warn, error, fatal, panic level: "info" @@ -101,12 +107,21 @@ func validate(cfg *Config) error { if cfg.Storage.DBPath == "" { return fmt.Errorf("storage.db_path is required") } + if cfg.RPC.ListenAddr == "" { + return fmt.Errorf("rpc.listen_addr is required") + } + if cfg.RPC.GRPCListenAddr == "" { + return fmt.Errorf("rpc.grpc_listen_addr is required") + } if cfg.Sync.BatchSize <= 0 { return fmt.Errorf("sync.batch_size must be positive") } if cfg.Sync.Concurrency <= 0 { return fmt.Errorf("sync.concurrency must be positive") } + if cfg.Subscription.BufferSize <= 0 { + return fmt.Errorf("subscription.buffer_size must be positive") + } if !validLogLevels[cfg.Log.Level] { return fmt.Errorf("log.level %q is invalid; must be one of trace/debug/info/warn/error/fatal/panic", cfg.Log.Level) } diff --git a/go.mod b/go.mod index 49ac63e..7ddbecc 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,13 @@ module github.com/evstack/apex -go 1.24.0 +go 1.25.0 require ( github.com/filecoin-project/go-jsonrpc v0.10.1 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.10.2 + google.golang.org/grpc v1.79.1 + google.golang.org/protobuf v1.36.11 gopkg.in/yaml.v3 v3.0.1 modernc.org/sqlite v1.46.1 ) @@ -28,8 +30,11 @@ require ( go.uber.org/multierr v1.5.0 // indirect go.uber.org/zap v1.14.1 // indirect golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/net v0.48.0 // indirect golang.org/x/sys v0.41.0 // indirect + golang.org/x/text v0.32.0 // indirect golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect modernc.org/libc v1.67.6 // indirect modernc.org/mathutil v1.7.1 // indirect diff --git a/go.sum b/go.sum index ce87694..09dfec8 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -12,6 +14,10 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/filecoin-project/go-jsonrpc v0.10.1 h1:iEhgrjO0+rawwOZWRNgexLrWGLA+IEUyWiRRL134Ob8= github.com/filecoin-project/go-jsonrpc v0.10.1/go.mod h1:OG7kVBVh/AbDFHIwx7Kw0l9ARmKOS6gGOr0LbdBpbLc= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= @@ -19,9 +25,11 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= @@ -78,6 +86,18 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= @@ -98,21 +118,23 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -124,6 +146,8 @@ golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -131,17 +155,25 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d h1:t/LOSXPJ9R0B6fnZNyALBRfZBH0Uy0gT+uR+SJ6syqQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260217215200-42d3e9bedb6d/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY= +google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/justfile b/justfile index ff50d8f..cf06b49 100644 --- a/justfile +++ b/justfile @@ -36,5 +36,9 @@ tidy-check: go mod tidy git diff --exit-code go.mod go.sum +# Generate protobuf code +proto: + buf generate + # Run all checks (CI equivalent) check: tidy-check lint test build diff --git a/pkg/api/grpc/blob_service.go b/pkg/api/grpc/blob_service.go new file mode 100644 index 0000000..3a60bcd --- /dev/null +++ b/pkg/api/grpc/blob_service.go @@ -0,0 +1,139 @@ +package grpcapi + +import ( + "bytes" + "context" + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/evstack/apex/pkg/api" + pb "github.com/evstack/apex/pkg/api/grpc/gen/apex/v1" + "github.com/evstack/apex/pkg/store" + "github.com/evstack/apex/pkg/types" +) + +// BlobServiceServer implements the BlobService gRPC interface. +type BlobServiceServer struct { + pb.UnimplementedBlobServiceServer + svc *api.Service +} + +func (s *BlobServiceServer) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) { + ns, err := bytesToNamespace(req.Namespace) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid namespace: %v", err) + } + + blobs, err := s.svc.Store().GetBlobs(ctx, ns, req.Height, req.Height, 0, 0) + if err != nil { + return nil, status.Errorf(codes.Internal, "get blobs: %v", err) + } + + for i := range blobs { + if bytes.Equal(blobs[i].Commitment, req.Commitment) { + return &pb.GetResponse{Blob: blobToProto(&blobs[i])}, nil + } + } + + return nil, status.Error(codes.NotFound, store.ErrNotFound.Error()) +} + +func (s *BlobServiceServer) GetAll(ctx context.Context, req *pb.GetAllRequest) (*pb.GetAllResponse, error) { + const maxNamespaces = 16 + if len(req.Namespaces) > maxNamespaces { + return nil, status.Errorf(codes.InvalidArgument, "too many namespaces: %d (max %d)", len(req.Namespaces), maxNamespaces) + } + + nsList := make([]types.Namespace, len(req.Namespaces)) + for i, nsBytes := range req.Namespaces { + ns, err := bytesToNamespace(nsBytes) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid namespace %d: %v", i, err) + } + nsList[i] = ns + } + + var allBlobs []types.Blob + for _, ns := range nsList { + blobs, err := s.svc.Store().GetBlobs(ctx, ns, req.Height, req.Height, 0, 0) + if err != nil { + return nil, status.Errorf(codes.Internal, "get blobs: %v", err) + } + allBlobs = append(allBlobs, blobs...) + } + + // Apply pagination to the aggregate result. + if req.Offset > 0 { + if int(req.Offset) >= len(allBlobs) { + allBlobs = nil + } else { + allBlobs = allBlobs[req.Offset:] + } + } + if req.Limit > 0 && int(req.Limit) < len(allBlobs) { + allBlobs = allBlobs[:req.Limit] + } + + pbBlobs := make([]*pb.Blob, len(allBlobs)) + for i := range allBlobs { + pbBlobs[i] = blobToProto(&allBlobs[i]) + } + + return &pb.GetAllResponse{Blobs: pbBlobs}, nil +} + +func (s *BlobServiceServer) Subscribe(req *pb.BlobServiceSubscribeRequest, stream grpc.ServerStreamingServer[pb.BlobServiceSubscribeResponse]) error { + ns, err := bytesToNamespace(req.Namespace) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid namespace: %v", err) + } + + sub := s.svc.Notifier().Subscribe([]types.Namespace{ns}) + defer s.svc.Notifier().Unsubscribe(sub) + + ctx := stream.Context() + for { + select { + case <-ctx.Done(): + return nil + case ev, ok := <-sub.Events(): + if !ok { + return nil + } + pbBlobs := make([]*pb.Blob, len(ev.Blobs)) + for i := range ev.Blobs { + pbBlobs[i] = blobToProto(&ev.Blobs[i]) + } + if err := stream.Send(&pb.BlobServiceSubscribeResponse{ + Height: ev.Height, + Blobs: pbBlobs, + }); err != nil { + return err + } + } + } +} + +func blobToProto(b *types.Blob) *pb.Blob { + return &pb.Blob{ + Height: b.Height, + Namespace: b.Namespace[:], + Data: b.Data, + Commitment: b.Commitment, + ShareVersion: b.ShareVersion, + Signer: b.Signer, + Index: int32(b.Index), + } +} + +func bytesToNamespace(b []byte) (types.Namespace, error) { + if len(b) != types.NamespaceSize { + return types.Namespace{}, fmt.Errorf("expected %d bytes, got %d", types.NamespaceSize, len(b)) + } + var ns types.Namespace + copy(ns[:], b) + return ns, nil +} diff --git a/pkg/api/grpc/gen/apex/v1/blob.pb.go b/pkg/api/grpc/gen/apex/v1/blob.pb.go new file mode 100644 index 0000000..9ecd440 --- /dev/null +++ b/pkg/api/grpc/gen/apex/v1/blob.pb.go @@ -0,0 +1,433 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: apex/v1/blob.proto + +package gen + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Commitment []byte `protobuf:"bytes,3,opt,name=commitment,proto3" json:"commitment,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + mi := &file_apex_v1_blob_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetRequest) ProtoMessage() {} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_blob_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_blob_proto_rawDescGZIP(), []int{0} +} + +func (x *GetRequest) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *GetRequest) GetNamespace() []byte { + if x != nil { + return x.Namespace + } + return nil +} + +func (x *GetRequest) GetCommitment() []byte { + if x != nil { + return x.Commitment + } + return nil +} + +type GetResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Blob *Blob `protobuf:"bytes,1,opt,name=blob,proto3" json:"blob,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + mi := &file_apex_v1_blob_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetResponse) ProtoMessage() {} + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_blob_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_blob_proto_rawDescGZIP(), []int{1} +} + +func (x *GetResponse) GetBlob() *Blob { + if x != nil { + return x.Blob + } + return nil +} + +type GetAllRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Namespaces [][]byte `protobuf:"bytes,2,rep,name=namespaces,proto3" json:"namespaces,omitempty"` + // Pagination: maximum number of blobs to return. 0 means no limit. + Limit int32 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` + // Pagination: number of blobs to skip. + Offset int32 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAllRequest) Reset() { + *x = GetAllRequest{} + mi := &file_apex_v1_blob_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAllRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllRequest) ProtoMessage() {} + +func (x *GetAllRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_blob_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllRequest.ProtoReflect.Descriptor instead. +func (*GetAllRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_blob_proto_rawDescGZIP(), []int{2} +} + +func (x *GetAllRequest) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *GetAllRequest) GetNamespaces() [][]byte { + if x != nil { + return x.Namespaces + } + return nil +} + +func (x *GetAllRequest) GetLimit() int32 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *GetAllRequest) GetOffset() int32 { + if x != nil { + return x.Offset + } + return 0 +} + +type GetAllResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Blobs []*Blob `protobuf:"bytes,1,rep,name=blobs,proto3" json:"blobs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetAllResponse) Reset() { + *x = GetAllResponse{} + mi := &file_apex_v1_blob_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetAllResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAllResponse) ProtoMessage() {} + +func (x *GetAllResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_blob_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAllResponse.ProtoReflect.Descriptor instead. +func (*GetAllResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_blob_proto_rawDescGZIP(), []int{3} +} + +func (x *GetAllResponse) GetBlobs() []*Blob { + if x != nil { + return x.Blobs + } + return nil +} + +type BlobServiceSubscribeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace []byte `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BlobServiceSubscribeRequest) Reset() { + *x = BlobServiceSubscribeRequest{} + mi := &file_apex_v1_blob_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BlobServiceSubscribeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlobServiceSubscribeRequest) ProtoMessage() {} + +func (x *BlobServiceSubscribeRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_blob_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlobServiceSubscribeRequest.ProtoReflect.Descriptor instead. +func (*BlobServiceSubscribeRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_blob_proto_rawDescGZIP(), []int{4} +} + +func (x *BlobServiceSubscribeRequest) GetNamespace() []byte { + if x != nil { + return x.Namespace + } + return nil +} + +type BlobServiceSubscribeResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Blobs []*Blob `protobuf:"bytes,2,rep,name=blobs,proto3" json:"blobs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BlobServiceSubscribeResponse) Reset() { + *x = BlobServiceSubscribeResponse{} + mi := &file_apex_v1_blob_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BlobServiceSubscribeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlobServiceSubscribeResponse) ProtoMessage() {} + +func (x *BlobServiceSubscribeResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_blob_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlobServiceSubscribeResponse.ProtoReflect.Descriptor instead. +func (*BlobServiceSubscribeResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_blob_proto_rawDescGZIP(), []int{5} +} + +func (x *BlobServiceSubscribeResponse) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *BlobServiceSubscribeResponse) GetBlobs() []*Blob { + if x != nil { + return x.Blobs + } + return nil +} + +var File_apex_v1_blob_proto protoreflect.FileDescriptor + +const file_apex_v1_blob_proto_rawDesc = "" + + "\n" + + "\x12apex/v1/blob.proto\x12\aapex.v1\x1a\x13apex/v1/types.proto\"b\n" + + "\n" + + "GetRequest\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\fR\tnamespace\x12\x1e\n" + + "\n" + + "commitment\x18\x03 \x01(\fR\n" + + "commitment\"0\n" + + "\vGetResponse\x12!\n" + + "\x04blob\x18\x01 \x01(\v2\r.apex.v1.BlobR\x04blob\"u\n" + + "\rGetAllRequest\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\x12\x1e\n" + + "\n" + + "namespaces\x18\x02 \x03(\fR\n" + + "namespaces\x12\x14\n" + + "\x05limit\x18\x03 \x01(\x05R\x05limit\x12\x16\n" + + "\x06offset\x18\x04 \x01(\x05R\x06offset\"5\n" + + "\x0eGetAllResponse\x12#\n" + + "\x05blobs\x18\x01 \x03(\v2\r.apex.v1.BlobR\x05blobs\";\n" + + "\x1bBlobServiceSubscribeRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\fR\tnamespace\"[\n" + + "\x1cBlobServiceSubscribeResponse\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\x12#\n" + + "\x05blobs\x18\x02 \x03(\v2\r.apex.v1.BlobR\x05blobs2\xd6\x01\n" + + "\vBlobService\x120\n" + + "\x03Get\x12\x13.apex.v1.GetRequest\x1a\x14.apex.v1.GetResponse\x129\n" + + "\x06GetAll\x12\x16.apex.v1.GetAllRequest\x1a\x17.apex.v1.GetAllResponse\x12Z\n" + + "\tSubscribe\x12$.apex.v1.BlobServiceSubscribeRequest\x1a%.apex.v1.BlobServiceSubscribeResponse0\x01B.Z,github.com/evstack/apex/pkg/api/grpc/gen;genb\x06proto3" + +var ( + file_apex_v1_blob_proto_rawDescOnce sync.Once + file_apex_v1_blob_proto_rawDescData []byte +) + +func file_apex_v1_blob_proto_rawDescGZIP() []byte { + file_apex_v1_blob_proto_rawDescOnce.Do(func() { + file_apex_v1_blob_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_apex_v1_blob_proto_rawDesc), len(file_apex_v1_blob_proto_rawDesc))) + }) + return file_apex_v1_blob_proto_rawDescData +} + +var file_apex_v1_blob_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_apex_v1_blob_proto_goTypes = []any{ + (*GetRequest)(nil), // 0: apex.v1.GetRequest + (*GetResponse)(nil), // 1: apex.v1.GetResponse + (*GetAllRequest)(nil), // 2: apex.v1.GetAllRequest + (*GetAllResponse)(nil), // 3: apex.v1.GetAllResponse + (*BlobServiceSubscribeRequest)(nil), // 4: apex.v1.BlobServiceSubscribeRequest + (*BlobServiceSubscribeResponse)(nil), // 5: apex.v1.BlobServiceSubscribeResponse + (*Blob)(nil), // 6: apex.v1.Blob +} +var file_apex_v1_blob_proto_depIdxs = []int32{ + 6, // 0: apex.v1.GetResponse.blob:type_name -> apex.v1.Blob + 6, // 1: apex.v1.GetAllResponse.blobs:type_name -> apex.v1.Blob + 6, // 2: apex.v1.BlobServiceSubscribeResponse.blobs:type_name -> apex.v1.Blob + 0, // 3: apex.v1.BlobService.Get:input_type -> apex.v1.GetRequest + 2, // 4: apex.v1.BlobService.GetAll:input_type -> apex.v1.GetAllRequest + 4, // 5: apex.v1.BlobService.Subscribe:input_type -> apex.v1.BlobServiceSubscribeRequest + 1, // 6: apex.v1.BlobService.Get:output_type -> apex.v1.GetResponse + 3, // 7: apex.v1.BlobService.GetAll:output_type -> apex.v1.GetAllResponse + 5, // 8: apex.v1.BlobService.Subscribe:output_type -> apex.v1.BlobServiceSubscribeResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_apex_v1_blob_proto_init() } +func file_apex_v1_blob_proto_init() { + if File_apex_v1_blob_proto != nil { + return + } + file_apex_v1_types_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_apex_v1_blob_proto_rawDesc), len(file_apex_v1_blob_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_apex_v1_blob_proto_goTypes, + DependencyIndexes: file_apex_v1_blob_proto_depIdxs, + MessageInfos: file_apex_v1_blob_proto_msgTypes, + }.Build() + File_apex_v1_blob_proto = out.File + file_apex_v1_blob_proto_goTypes = nil + file_apex_v1_blob_proto_depIdxs = nil +} diff --git a/pkg/api/grpc/gen/apex/v1/blob_grpc.pb.go b/pkg/api/grpc/gen/apex/v1/blob_grpc.pb.go new file mode 100644 index 0000000..8134f42 --- /dev/null +++ b/pkg/api/grpc/gen/apex/v1/blob_grpc.pb.go @@ -0,0 +1,211 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.1 +// - protoc (unknown) +// source: apex/v1/blob.proto + +package gen + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + BlobService_Get_FullMethodName = "/apex.v1.BlobService/Get" + BlobService_GetAll_FullMethodName = "/apex.v1.BlobService/GetAll" + BlobService_Subscribe_FullMethodName = "/apex.v1.BlobService/Subscribe" +) + +// BlobServiceClient is the client API for BlobService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// BlobService provides access to indexed blobs. +type BlobServiceClient interface { + // Get returns a single blob matching the namespace and commitment at the given height. + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + // GetAll returns all blobs for the given namespaces at the given height. + GetAll(ctx context.Context, in *GetAllRequest, opts ...grpc.CallOption) (*GetAllResponse, error) + // Subscribe streams blob events for the given namespace. + Subscribe(ctx context.Context, in *BlobServiceSubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[BlobServiceSubscribeResponse], error) +} + +type blobServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewBlobServiceClient(cc grpc.ClientConnInterface) BlobServiceClient { + return &blobServiceClient{cc} +} + +func (c *blobServiceClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetResponse) + err := c.cc.Invoke(ctx, BlobService_Get_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *blobServiceClient) GetAll(ctx context.Context, in *GetAllRequest, opts ...grpc.CallOption) (*GetAllResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetAllResponse) + err := c.cc.Invoke(ctx, BlobService_GetAll_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *blobServiceClient) Subscribe(ctx context.Context, in *BlobServiceSubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[BlobServiceSubscribeResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &BlobService_ServiceDesc.Streams[0], BlobService_Subscribe_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[BlobServiceSubscribeRequest, BlobServiceSubscribeResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BlobService_SubscribeClient = grpc.ServerStreamingClient[BlobServiceSubscribeResponse] + +// BlobServiceServer is the server API for BlobService service. +// All implementations must embed UnimplementedBlobServiceServer +// for forward compatibility. +// +// BlobService provides access to indexed blobs. +type BlobServiceServer interface { + // Get returns a single blob matching the namespace and commitment at the given height. + Get(context.Context, *GetRequest) (*GetResponse, error) + // GetAll returns all blobs for the given namespaces at the given height. + GetAll(context.Context, *GetAllRequest) (*GetAllResponse, error) + // Subscribe streams blob events for the given namespace. + Subscribe(*BlobServiceSubscribeRequest, grpc.ServerStreamingServer[BlobServiceSubscribeResponse]) error + mustEmbedUnimplementedBlobServiceServer() +} + +// UnimplementedBlobServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedBlobServiceServer struct{} + +func (UnimplementedBlobServiceServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedBlobServiceServer) GetAll(context.Context, *GetAllRequest) (*GetAllResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetAll not implemented") +} +func (UnimplementedBlobServiceServer) Subscribe(*BlobServiceSubscribeRequest, grpc.ServerStreamingServer[BlobServiceSubscribeResponse]) error { + return status.Error(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedBlobServiceServer) mustEmbedUnimplementedBlobServiceServer() {} +func (UnimplementedBlobServiceServer) testEmbeddedByValue() {} + +// UnsafeBlobServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BlobServiceServer will +// result in compilation errors. +type UnsafeBlobServiceServer interface { + mustEmbedUnimplementedBlobServiceServer() +} + +func RegisterBlobServiceServer(s grpc.ServiceRegistrar, srv BlobServiceServer) { + // If the following call panics, it indicates UnimplementedBlobServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&BlobService_ServiceDesc, srv) +} + +func _BlobService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlobServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BlobService_Get_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlobServiceServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BlobService_GetAll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAllRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BlobServiceServer).GetAll(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BlobService_GetAll_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BlobServiceServer).GetAll(ctx, req.(*GetAllRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BlobService_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(BlobServiceSubscribeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(BlobServiceServer).Subscribe(m, &grpc.GenericServerStream[BlobServiceSubscribeRequest, BlobServiceSubscribeResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type BlobService_SubscribeServer = grpc.ServerStreamingServer[BlobServiceSubscribeResponse] + +// BlobService_ServiceDesc is the grpc.ServiceDesc for BlobService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var BlobService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "apex.v1.BlobService", + HandlerType: (*BlobServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _BlobService_Get_Handler, + }, + { + MethodName: "GetAll", + Handler: _BlobService_GetAll_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _BlobService_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "apex/v1/blob.proto", +} diff --git a/pkg/api/grpc/gen/apex/v1/header.pb.go b/pkg/api/grpc/gen/apex/v1/header.pb.go new file mode 100644 index 0000000..8ee67f8 --- /dev/null +++ b/pkg/api/grpc/gen/apex/v1/header.pb.go @@ -0,0 +1,443 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: apex/v1/header.proto + +package gen + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetByHeightRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetByHeightRequest) Reset() { + *x = GetByHeightRequest{} + mi := &file_apex_v1_header_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetByHeightRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetByHeightRequest) ProtoMessage() {} + +func (x *GetByHeightRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetByHeightRequest.ProtoReflect.Descriptor instead. +func (*GetByHeightRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{0} +} + +func (x *GetByHeightRequest) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +type GetByHeightResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetByHeightResponse) Reset() { + *x = GetByHeightResponse{} + mi := &file_apex_v1_header_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetByHeightResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetByHeightResponse) ProtoMessage() {} + +func (x *GetByHeightResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetByHeightResponse.ProtoReflect.Descriptor instead. +func (*GetByHeightResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{1} +} + +func (x *GetByHeightResponse) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +type LocalHeadRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalHeadRequest) Reset() { + *x = LocalHeadRequest{} + mi := &file_apex_v1_header_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalHeadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalHeadRequest) ProtoMessage() {} + +func (x *LocalHeadRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalHeadRequest.ProtoReflect.Descriptor instead. +func (*LocalHeadRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{2} +} + +type LocalHeadResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LocalHeadResponse) Reset() { + *x = LocalHeadResponse{} + mi := &file_apex_v1_header_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LocalHeadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalHeadResponse) ProtoMessage() {} + +func (x *LocalHeadResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalHeadResponse.ProtoReflect.Descriptor instead. +func (*LocalHeadResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{3} +} + +func (x *LocalHeadResponse) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +type NetworkHeadRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NetworkHeadRequest) Reset() { + *x = NetworkHeadRequest{} + mi := &file_apex_v1_header_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NetworkHeadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkHeadRequest) ProtoMessage() {} + +func (x *NetworkHeadRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkHeadRequest.ProtoReflect.Descriptor instead. +func (*NetworkHeadRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{4} +} + +type NetworkHeadResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NetworkHeadResponse) Reset() { + *x = NetworkHeadResponse{} + mi := &file_apex_v1_header_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NetworkHeadResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkHeadResponse) ProtoMessage() {} + +func (x *NetworkHeadResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkHeadResponse.ProtoReflect.Descriptor instead. +func (*NetworkHeadResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{5} +} + +func (x *NetworkHeadResponse) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +type HeaderServiceSubscribeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderServiceSubscribeRequest) Reset() { + *x = HeaderServiceSubscribeRequest{} + mi := &file_apex_v1_header_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderServiceSubscribeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderServiceSubscribeRequest) ProtoMessage() {} + +func (x *HeaderServiceSubscribeRequest) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderServiceSubscribeRequest.ProtoReflect.Descriptor instead. +func (*HeaderServiceSubscribeRequest) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{6} +} + +type HeaderServiceSubscribeResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Header *Header `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeaderServiceSubscribeResponse) Reset() { + *x = HeaderServiceSubscribeResponse{} + mi := &file_apex_v1_header_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeaderServiceSubscribeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderServiceSubscribeResponse) ProtoMessage() {} + +func (x *HeaderServiceSubscribeResponse) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_header_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderServiceSubscribeResponse.ProtoReflect.Descriptor instead. +func (*HeaderServiceSubscribeResponse) Descriptor() ([]byte, []int) { + return file_apex_v1_header_proto_rawDescGZIP(), []int{7} +} + +func (x *HeaderServiceSubscribeResponse) GetHeader() *Header { + if x != nil { + return x.Header + } + return nil +} + +var File_apex_v1_header_proto protoreflect.FileDescriptor + +const file_apex_v1_header_proto_rawDesc = "" + + "\n" + + "\x14apex/v1/header.proto\x12\aapex.v1\x1a\x13apex/v1/types.proto\",\n" + + "\x12GetByHeightRequest\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\">\n" + + "\x13GetByHeightResponse\x12'\n" + + "\x06header\x18\x01 \x01(\v2\x0f.apex.v1.HeaderR\x06header\"\x12\n" + + "\x10LocalHeadRequest\"<\n" + + "\x11LocalHeadResponse\x12'\n" + + "\x06header\x18\x01 \x01(\v2\x0f.apex.v1.HeaderR\x06header\"\x14\n" + + "\x12NetworkHeadRequest\">\n" + + "\x13NetworkHeadResponse\x12'\n" + + "\x06header\x18\x01 \x01(\v2\x0f.apex.v1.HeaderR\x06header\"\x1f\n" + + "\x1dHeaderServiceSubscribeRequest\"I\n" + + "\x1eHeaderServiceSubscribeResponse\x12'\n" + + "\x06header\x18\x01 \x01(\v2\x0f.apex.v1.HeaderR\x06header2\xc7\x02\n" + + "\rHeaderService\x12H\n" + + "\vGetByHeight\x12\x1b.apex.v1.GetByHeightRequest\x1a\x1c.apex.v1.GetByHeightResponse\x12B\n" + + "\tLocalHead\x12\x19.apex.v1.LocalHeadRequest\x1a\x1a.apex.v1.LocalHeadResponse\x12H\n" + + "\vNetworkHead\x12\x1b.apex.v1.NetworkHeadRequest\x1a\x1c.apex.v1.NetworkHeadResponse\x12^\n" + + "\tSubscribe\x12&.apex.v1.HeaderServiceSubscribeRequest\x1a'.apex.v1.HeaderServiceSubscribeResponse0\x01B.Z,github.com/evstack/apex/pkg/api/grpc/gen;genb\x06proto3" + +var ( + file_apex_v1_header_proto_rawDescOnce sync.Once + file_apex_v1_header_proto_rawDescData []byte +) + +func file_apex_v1_header_proto_rawDescGZIP() []byte { + file_apex_v1_header_proto_rawDescOnce.Do(func() { + file_apex_v1_header_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_apex_v1_header_proto_rawDesc), len(file_apex_v1_header_proto_rawDesc))) + }) + return file_apex_v1_header_proto_rawDescData +} + +var file_apex_v1_header_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_apex_v1_header_proto_goTypes = []any{ + (*GetByHeightRequest)(nil), // 0: apex.v1.GetByHeightRequest + (*GetByHeightResponse)(nil), // 1: apex.v1.GetByHeightResponse + (*LocalHeadRequest)(nil), // 2: apex.v1.LocalHeadRequest + (*LocalHeadResponse)(nil), // 3: apex.v1.LocalHeadResponse + (*NetworkHeadRequest)(nil), // 4: apex.v1.NetworkHeadRequest + (*NetworkHeadResponse)(nil), // 5: apex.v1.NetworkHeadResponse + (*HeaderServiceSubscribeRequest)(nil), // 6: apex.v1.HeaderServiceSubscribeRequest + (*HeaderServiceSubscribeResponse)(nil), // 7: apex.v1.HeaderServiceSubscribeResponse + (*Header)(nil), // 8: apex.v1.Header +} +var file_apex_v1_header_proto_depIdxs = []int32{ + 8, // 0: apex.v1.GetByHeightResponse.header:type_name -> apex.v1.Header + 8, // 1: apex.v1.LocalHeadResponse.header:type_name -> apex.v1.Header + 8, // 2: apex.v1.NetworkHeadResponse.header:type_name -> apex.v1.Header + 8, // 3: apex.v1.HeaderServiceSubscribeResponse.header:type_name -> apex.v1.Header + 0, // 4: apex.v1.HeaderService.GetByHeight:input_type -> apex.v1.GetByHeightRequest + 2, // 5: apex.v1.HeaderService.LocalHead:input_type -> apex.v1.LocalHeadRequest + 4, // 6: apex.v1.HeaderService.NetworkHead:input_type -> apex.v1.NetworkHeadRequest + 6, // 7: apex.v1.HeaderService.Subscribe:input_type -> apex.v1.HeaderServiceSubscribeRequest + 1, // 8: apex.v1.HeaderService.GetByHeight:output_type -> apex.v1.GetByHeightResponse + 3, // 9: apex.v1.HeaderService.LocalHead:output_type -> apex.v1.LocalHeadResponse + 5, // 10: apex.v1.HeaderService.NetworkHead:output_type -> apex.v1.NetworkHeadResponse + 7, // 11: apex.v1.HeaderService.Subscribe:output_type -> apex.v1.HeaderServiceSubscribeResponse + 8, // [8:12] is the sub-list for method output_type + 4, // [4:8] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_apex_v1_header_proto_init() } +func file_apex_v1_header_proto_init() { + if File_apex_v1_header_proto != nil { + return + } + file_apex_v1_types_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_apex_v1_header_proto_rawDesc), len(file_apex_v1_header_proto_rawDesc)), + NumEnums: 0, + NumMessages: 8, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_apex_v1_header_proto_goTypes, + DependencyIndexes: file_apex_v1_header_proto_depIdxs, + MessageInfos: file_apex_v1_header_proto_msgTypes, + }.Build() + File_apex_v1_header_proto = out.File + file_apex_v1_header_proto_goTypes = nil + file_apex_v1_header_proto_depIdxs = nil +} diff --git a/pkg/api/grpc/gen/apex/v1/header_grpc.pb.go b/pkg/api/grpc/gen/apex/v1/header_grpc.pb.go new file mode 100644 index 0000000..2798c6d --- /dev/null +++ b/pkg/api/grpc/gen/apex/v1/header_grpc.pb.go @@ -0,0 +1,251 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.1 +// - protoc (unknown) +// source: apex/v1/header.proto + +package gen + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + HeaderService_GetByHeight_FullMethodName = "/apex.v1.HeaderService/GetByHeight" + HeaderService_LocalHead_FullMethodName = "/apex.v1.HeaderService/LocalHead" + HeaderService_NetworkHead_FullMethodName = "/apex.v1.HeaderService/NetworkHead" + HeaderService_Subscribe_FullMethodName = "/apex.v1.HeaderService/Subscribe" +) + +// HeaderServiceClient is the client API for HeaderService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// HeaderService provides access to indexed headers. +type HeaderServiceClient interface { + // GetByHeight returns the header at the given height. + GetByHeight(ctx context.Context, in *GetByHeightRequest, opts ...grpc.CallOption) (*GetByHeightResponse, error) + // LocalHead returns the header at the latest synced height. + LocalHead(ctx context.Context, in *LocalHeadRequest, opts ...grpc.CallOption) (*LocalHeadResponse, error) + // NetworkHead returns the current network head from the upstream node. + NetworkHead(ctx context.Context, in *NetworkHeadRequest, opts ...grpc.CallOption) (*NetworkHeadResponse, error) + // Subscribe streams new headers as they are indexed. + Subscribe(ctx context.Context, in *HeaderServiceSubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HeaderServiceSubscribeResponse], error) +} + +type headerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewHeaderServiceClient(cc grpc.ClientConnInterface) HeaderServiceClient { + return &headerServiceClient{cc} +} + +func (c *headerServiceClient) GetByHeight(ctx context.Context, in *GetByHeightRequest, opts ...grpc.CallOption) (*GetByHeightResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetByHeightResponse) + err := c.cc.Invoke(ctx, HeaderService_GetByHeight_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *headerServiceClient) LocalHead(ctx context.Context, in *LocalHeadRequest, opts ...grpc.CallOption) (*LocalHeadResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(LocalHeadResponse) + err := c.cc.Invoke(ctx, HeaderService_LocalHead_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *headerServiceClient) NetworkHead(ctx context.Context, in *NetworkHeadRequest, opts ...grpc.CallOption) (*NetworkHeadResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(NetworkHeadResponse) + err := c.cc.Invoke(ctx, HeaderService_NetworkHead_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *headerServiceClient) Subscribe(ctx context.Context, in *HeaderServiceSubscribeRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HeaderServiceSubscribeResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &HeaderService_ServiceDesc.Streams[0], HeaderService_Subscribe_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[HeaderServiceSubscribeRequest, HeaderServiceSubscribeResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HeaderService_SubscribeClient = grpc.ServerStreamingClient[HeaderServiceSubscribeResponse] + +// HeaderServiceServer is the server API for HeaderService service. +// All implementations must embed UnimplementedHeaderServiceServer +// for forward compatibility. +// +// HeaderService provides access to indexed headers. +type HeaderServiceServer interface { + // GetByHeight returns the header at the given height. + GetByHeight(context.Context, *GetByHeightRequest) (*GetByHeightResponse, error) + // LocalHead returns the header at the latest synced height. + LocalHead(context.Context, *LocalHeadRequest) (*LocalHeadResponse, error) + // NetworkHead returns the current network head from the upstream node. + NetworkHead(context.Context, *NetworkHeadRequest) (*NetworkHeadResponse, error) + // Subscribe streams new headers as they are indexed. + Subscribe(*HeaderServiceSubscribeRequest, grpc.ServerStreamingServer[HeaderServiceSubscribeResponse]) error + mustEmbedUnimplementedHeaderServiceServer() +} + +// UnimplementedHeaderServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHeaderServiceServer struct{} + +func (UnimplementedHeaderServiceServer) GetByHeight(context.Context, *GetByHeightRequest) (*GetByHeightResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetByHeight not implemented") +} +func (UnimplementedHeaderServiceServer) LocalHead(context.Context, *LocalHeadRequest) (*LocalHeadResponse, error) { + return nil, status.Error(codes.Unimplemented, "method LocalHead not implemented") +} +func (UnimplementedHeaderServiceServer) NetworkHead(context.Context, *NetworkHeadRequest) (*NetworkHeadResponse, error) { + return nil, status.Error(codes.Unimplemented, "method NetworkHead not implemented") +} +func (UnimplementedHeaderServiceServer) Subscribe(*HeaderServiceSubscribeRequest, grpc.ServerStreamingServer[HeaderServiceSubscribeResponse]) error { + return status.Error(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedHeaderServiceServer) mustEmbedUnimplementedHeaderServiceServer() {} +func (UnimplementedHeaderServiceServer) testEmbeddedByValue() {} + +// UnsafeHeaderServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HeaderServiceServer will +// result in compilation errors. +type UnsafeHeaderServiceServer interface { + mustEmbedUnimplementedHeaderServiceServer() +} + +func RegisterHeaderServiceServer(s grpc.ServiceRegistrar, srv HeaderServiceServer) { + // If the following call panics, it indicates UnimplementedHeaderServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&HeaderService_ServiceDesc, srv) +} + +func _HeaderService_GetByHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetByHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeaderServiceServer).GetByHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeaderService_GetByHeight_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeaderServiceServer).GetByHeight(ctx, req.(*GetByHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HeaderService_LocalHead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LocalHeadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeaderServiceServer).LocalHead(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeaderService_LocalHead_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeaderServiceServer).LocalHead(ctx, req.(*LocalHeadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HeaderService_NetworkHead_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NetworkHeadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HeaderServiceServer).NetworkHead(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HeaderService_NetworkHead_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HeaderServiceServer).NetworkHead(ctx, req.(*NetworkHeadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HeaderService_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HeaderServiceSubscribeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HeaderServiceServer).Subscribe(m, &grpc.GenericServerStream[HeaderServiceSubscribeRequest, HeaderServiceSubscribeResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type HeaderService_SubscribeServer = grpc.ServerStreamingServer[HeaderServiceSubscribeResponse] + +// HeaderService_ServiceDesc is the grpc.ServiceDesc for HeaderService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var HeaderService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "apex.v1.HeaderService", + HandlerType: (*HeaderServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetByHeight", + Handler: _HeaderService_GetByHeight_Handler, + }, + { + MethodName: "LocalHead", + Handler: _HeaderService_LocalHead_Handler, + }, + { + MethodName: "NetworkHead", + Handler: _HeaderService_NetworkHead_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _HeaderService_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "apex/v1/header.proto", +} diff --git a/pkg/api/grpc/gen/apex/v1/types.pb.go b/pkg/api/grpc/gen/apex/v1/types.pb.go new file mode 100644 index 0000000..0e2896b --- /dev/null +++ b/pkg/api/grpc/gen/apex/v1/types.pb.go @@ -0,0 +1,267 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: apex/v1/types.proto + +package gen + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Blob represents a blob submitted to a Celestia namespace. +type Blob struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Namespace []byte `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Commitment []byte `protobuf:"bytes,4,opt,name=commitment,proto3" json:"commitment,omitempty"` + ShareVersion uint32 `protobuf:"varint,5,opt,name=share_version,json=shareVersion,proto3" json:"share_version,omitempty"` + Signer []byte `protobuf:"bytes,6,opt,name=signer,proto3" json:"signer,omitempty"` + Index int32 `protobuf:"varint,7,opt,name=index,proto3" json:"index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Blob) Reset() { + *x = Blob{} + mi := &file_apex_v1_types_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Blob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Blob) ProtoMessage() {} + +func (x *Blob) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_types_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Blob.ProtoReflect.Descriptor instead. +func (*Blob) Descriptor() ([]byte, []int) { + return file_apex_v1_types_proto_rawDescGZIP(), []int{0} +} + +func (x *Blob) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *Blob) GetNamespace() []byte { + if x != nil { + return x.Namespace + } + return nil +} + +func (x *Blob) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *Blob) GetCommitment() []byte { + if x != nil { + return x.Commitment + } + return nil +} + +func (x *Blob) GetShareVersion() uint32 { + if x != nil { + return x.ShareVersion + } + return 0 +} + +func (x *Blob) GetSigner() []byte { + if x != nil { + return x.Signer + } + return nil +} + +func (x *Blob) GetIndex() int32 { + if x != nil { + return x.Index + } + return 0 +} + +// Header represents a Celestia block header. +type Header struct { + state protoimpl.MessageState `protogen:"open.v1"` + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + DataHash []byte `protobuf:"bytes,3,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + Time *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time,proto3" json:"time,omitempty"` + RawHeader []byte `protobuf:"bytes,5,opt,name=raw_header,json=rawHeader,proto3" json:"raw_header,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Header) Reset() { + *x = Header{} + mi := &file_apex_v1_types_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_apex_v1_types_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_apex_v1_types_proto_rawDescGZIP(), []int{1} +} + +func (x *Header) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *Header) GetHash() []byte { + if x != nil { + return x.Hash + } + return nil +} + +func (x *Header) GetDataHash() []byte { + if x != nil { + return x.DataHash + } + return nil +} + +func (x *Header) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +func (x *Header) GetRawHeader() []byte { + if x != nil { + return x.RawHeader + } + return nil +} + +var File_apex_v1_types_proto protoreflect.FileDescriptor + +const file_apex_v1_types_proto_rawDesc = "" + + "\n" + + "\x13apex/v1/types.proto\x12\aapex.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xc3\x01\n" + + "\x04Blob\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\fR\tnamespace\x12\x12\n" + + "\x04data\x18\x03 \x01(\fR\x04data\x12\x1e\n" + + "\n" + + "commitment\x18\x04 \x01(\fR\n" + + "commitment\x12#\n" + + "\rshare_version\x18\x05 \x01(\rR\fshareVersion\x12\x16\n" + + "\x06signer\x18\x06 \x01(\fR\x06signer\x12\x14\n" + + "\x05index\x18\a \x01(\x05R\x05index\"\xa0\x01\n" + + "\x06Header\x12\x16\n" + + "\x06height\x18\x01 \x01(\x04R\x06height\x12\x12\n" + + "\x04hash\x18\x02 \x01(\fR\x04hash\x12\x1b\n" + + "\tdata_hash\x18\x03 \x01(\fR\bdataHash\x12.\n" + + "\x04time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x04time\x12\x1d\n" + + "\n" + + "raw_header\x18\x05 \x01(\fR\trawHeaderB.Z,github.com/evstack/apex/pkg/api/grpc/gen;genb\x06proto3" + +var ( + file_apex_v1_types_proto_rawDescOnce sync.Once + file_apex_v1_types_proto_rawDescData []byte +) + +func file_apex_v1_types_proto_rawDescGZIP() []byte { + file_apex_v1_types_proto_rawDescOnce.Do(func() { + file_apex_v1_types_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_apex_v1_types_proto_rawDesc), len(file_apex_v1_types_proto_rawDesc))) + }) + return file_apex_v1_types_proto_rawDescData +} + +var file_apex_v1_types_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apex_v1_types_proto_goTypes = []any{ + (*Blob)(nil), // 0: apex.v1.Blob + (*Header)(nil), // 1: apex.v1.Header + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp +} +var file_apex_v1_types_proto_depIdxs = []int32{ + 2, // 0: apex.v1.Header.time:type_name -> google.protobuf.Timestamp + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_apex_v1_types_proto_init() } +func file_apex_v1_types_proto_init() { + if File_apex_v1_types_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_apex_v1_types_proto_rawDesc), len(file_apex_v1_types_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_apex_v1_types_proto_goTypes, + DependencyIndexes: file_apex_v1_types_proto_depIdxs, + MessageInfos: file_apex_v1_types_proto_msgTypes, + }.Build() + File_apex_v1_types_proto = out.File + file_apex_v1_types_proto_goTypes = nil + file_apex_v1_types_proto_depIdxs = nil +} diff --git a/pkg/api/grpc/header_service.go b/pkg/api/grpc/header_service.go new file mode 100644 index 0000000..f741aad --- /dev/null +++ b/pkg/api/grpc/header_service.go @@ -0,0 +1,91 @@ +package grpcapi + +import ( + "context" + "errors" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/evstack/apex/pkg/api" + pb "github.com/evstack/apex/pkg/api/grpc/gen/apex/v1" + "github.com/evstack/apex/pkg/store" + "github.com/evstack/apex/pkg/types" +) + +// HeaderServiceServer implements the HeaderService gRPC interface. +type HeaderServiceServer struct { + pb.UnimplementedHeaderServiceServer + svc *api.Service +} + +func (s *HeaderServiceServer) GetByHeight(ctx context.Context, req *pb.GetByHeightRequest) (*pb.GetByHeightResponse, error) { + hdr, err := s.svc.Store().GetHeader(ctx, req.Height) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "header at height %d not found", req.Height) + } + return nil, status.Errorf(codes.Internal, "get header: %v", err) + } + return &pb.GetByHeightResponse{Header: headerToProto(hdr)}, nil +} + +func (s *HeaderServiceServer) LocalHead(ctx context.Context, _ *pb.LocalHeadRequest) (*pb.LocalHeadResponse, error) { + ss, err := s.svc.Store().GetSyncState(ctx) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "no sync state available") + } + return nil, status.Errorf(codes.Internal, "get sync state: %v", err) + } + hdr, err := s.svc.Store().GetHeader(ctx, ss.LatestHeight) + if err != nil { + if errors.Is(err, store.ErrNotFound) { + return nil, status.Errorf(codes.NotFound, "header at height %d not found", ss.LatestHeight) + } + return nil, status.Errorf(codes.Internal, "get header: %v", err) + } + return &pb.LocalHeadResponse{Header: headerToProto(hdr)}, nil +} + +func (s *HeaderServiceServer) NetworkHead(ctx context.Context, _ *pb.NetworkHeadRequest) (*pb.NetworkHeadResponse, error) { + hdr, err := s.svc.Fetcher().GetNetworkHead(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "get network head: %v", err) + } + return &pb.NetworkHeadResponse{Header: headerToProto(hdr)}, nil +} + +func (s *HeaderServiceServer) Subscribe(_ *pb.HeaderServiceSubscribeRequest, stream grpc.ServerStreamingServer[pb.HeaderServiceSubscribeResponse]) error { + sub := s.svc.HeaderSubscribe() + defer s.svc.Notifier().Unsubscribe(sub) + + ctx := stream.Context() + for { + select { + case <-ctx.Done(): + return nil + case ev, ok := <-sub.Events(): + if !ok { + return nil + } + if ev.Header != nil { + if err := stream.Send(&pb.HeaderServiceSubscribeResponse{Header: headerToProto(ev.Header)}); err != nil { + return err + } + } + } + } +} + +func headerToProto(h *types.Header) *pb.Header { + return &pb.Header{ + Height: h.Height, + Hash: h.Hash, + DataHash: h.DataHash, + Time: timestamppb.New(h.Time), + RawHeader: h.RawHeader, + } +} diff --git a/pkg/api/grpc/server.go b/pkg/api/grpc/server.go new file mode 100644 index 0000000..42b8a78 --- /dev/null +++ b/pkg/api/grpc/server.go @@ -0,0 +1,20 @@ +package grpcapi + +import ( + "github.com/rs/zerolog" + "google.golang.org/grpc" + + "github.com/evstack/apex/pkg/api" + pb "github.com/evstack/apex/pkg/api/grpc/gen/apex/v1" +) + +// NewServer creates a gRPC server with blob and header services registered. +func NewServer(svc *api.Service, log zerolog.Logger) *grpc.Server { + srv := grpc.NewServer() + + pb.RegisterBlobServiceServer(srv, &BlobServiceServer{svc: svc}) + pb.RegisterHeaderServiceServer(srv, &HeaderServiceServer{svc: svc}) + + log.Info().Msg("gRPC server initialized") + return srv +} diff --git a/pkg/api/grpc/server_test.go b/pkg/api/grpc/server_test.go new file mode 100644 index 0000000..a370528 --- /dev/null +++ b/pkg/api/grpc/server_test.go @@ -0,0 +1,338 @@ +package grpcapi + +import ( + "context" + "errors" + "net" + "testing" + "time" + + "github.com/rs/zerolog" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + "github.com/evstack/apex/pkg/api" + pb "github.com/evstack/apex/pkg/api/grpc/gen/apex/v1" + "github.com/evstack/apex/pkg/store" + "github.com/evstack/apex/pkg/types" +) + +// mockStore for gRPC tests. +type mockStore struct { + headers map[uint64]*types.Header + blobs map[uint64][]types.Blob + syncState *types.SyncStatus +} + +func newMockStore() *mockStore { + return &mockStore{ + headers: make(map[uint64]*types.Header), + blobs: make(map[uint64][]types.Blob), + } +} + +func (m *mockStore) PutBlobs(_ context.Context, blobs []types.Blob) error { + for _, b := range blobs { + m.blobs[b.Height] = append(m.blobs[b.Height], b) + } + return nil +} + +func (m *mockStore) GetBlob(_ context.Context, ns types.Namespace, height uint64, index int) (*types.Blob, error) { + for _, b := range m.blobs[height] { + if b.Namespace == ns && b.Index == index { + return &b, nil + } + } + return nil, store.ErrNotFound +} + +func (m *mockStore) GetBlobs(_ context.Context, ns types.Namespace, startHeight, endHeight uint64, _, _ int) ([]types.Blob, error) { + var result []types.Blob + for h := startHeight; h <= endHeight; h++ { + for _, b := range m.blobs[h] { + if b.Namespace == ns { + result = append(result, b) + } + } + } + return result, nil +} + +func (m *mockStore) PutHeader(_ context.Context, h *types.Header) error { + m.headers[h.Height] = h + return nil +} + +func (m *mockStore) GetHeader(_ context.Context, height uint64) (*types.Header, error) { + h, ok := m.headers[height] + if !ok { + return nil, store.ErrNotFound + } + return h, nil +} + +func (m *mockStore) PutNamespace(_ context.Context, _ types.Namespace) error { return nil } +func (m *mockStore) GetNamespaces(_ context.Context) ([]types.Namespace, error) { return nil, nil } + +func (m *mockStore) GetSyncState(_ context.Context) (*types.SyncStatus, error) { + if m.syncState == nil { + return nil, store.ErrNotFound + } + return m.syncState, nil +} + +func (m *mockStore) SetSyncState(_ context.Context, s types.SyncStatus) error { + m.syncState = &s + return nil +} + +func (m *mockStore) Close() error { return nil } + +type mockFetcher struct { + networkHead *types.Header +} + +func (f *mockFetcher) GetHeader(_ context.Context, _ uint64) (*types.Header, error) { + return nil, errors.New("not implemented") +} + +func (f *mockFetcher) GetBlobs(_ context.Context, _ uint64, _ []types.Namespace) ([]types.Blob, error) { + return nil, nil +} + +func (f *mockFetcher) GetNetworkHead(_ context.Context) (*types.Header, error) { + if f.networkHead == nil { + return nil, errors.New("no network head") + } + return f.networkHead, nil +} + +func (f *mockFetcher) SubscribeHeaders(_ context.Context) (<-chan *types.Header, error) { + return make(chan *types.Header), nil +} + +func (f *mockFetcher) Close() error { return nil } + +func testNamespace(b byte) types.Namespace { + var ns types.Namespace + ns[types.NamespaceSize-1] = b + return ns +} + +func startTestServer(t *testing.T, svc *api.Service) pb.BlobServiceClient { + t.Helper() + + srv := NewServer(svc, zerolog.Nop()) + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("listen: %v", err) + } + + go func() { _ = srv.Serve(lis) }() + t.Cleanup(func() { srv.GracefulStop() }) + + conn, err := grpc.NewClient(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("dial: %v", err) + } + t.Cleanup(func() { _ = conn.Close() }) + + return pb.NewBlobServiceClient(conn) +} + +func startTestHeaderServer(t *testing.T, svc *api.Service) pb.HeaderServiceClient { + t.Helper() + + srv := NewServer(svc, zerolog.Nop()) + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("listen: %v", err) + } + + go func() { _ = srv.Serve(lis) }() + t.Cleanup(func() { srv.GracefulStop() }) + + conn, err := grpc.NewClient(lis.Addr().String(), grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("dial: %v", err) + } + t.Cleanup(func() { _ = conn.Close() }) + + return pb.NewHeaderServiceClient(conn) +} + +func TestGRPCBlobGet(t *testing.T) { + st := newMockStore() + ns := testNamespace(1) + commitment := []byte("c1") + + st.blobs[10] = []types.Blob{ + {Height: 10, Namespace: ns, Data: []byte("d1"), Commitment: commitment, Index: 0}, + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + client := startTestServer(t, svc) + + resp, err := client.Get(context.Background(), &pb.GetRequest{ + Height: 10, + Namespace: ns[:], + Commitment: commitment, + }) + if err != nil { + t.Fatalf("Get: %v", err) + } + if resp.Blob.Height != 10 { + t.Errorf("Height = %d, want 10", resp.Blob.Height) + } + if string(resp.Blob.Data) != "d1" { + t.Errorf("Data = %q, want %q", resp.Blob.Data, "d1") + } +} + +func TestGRPCBlobGetAll(t *testing.T) { + st := newMockStore() + ns := testNamespace(1) + + st.blobs[10] = []types.Blob{ + {Height: 10, Namespace: ns, Data: []byte("d1"), Commitment: []byte("c1"), Index: 0}, + {Height: 10, Namespace: ns, Data: []byte("d2"), Commitment: []byte("c2"), Index: 1}, + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + client := startTestServer(t, svc) + + resp, err := client.GetAll(context.Background(), &pb.GetAllRequest{ + Height: 10, + Namespaces: [][]byte{ns[:]}, + }) + if err != nil { + t.Fatalf("GetAll: %v", err) + } + if len(resp.Blobs) != 2 { + t.Errorf("got %d blobs, want 2", len(resp.Blobs)) + } +} + +func TestGRPCHeaderGetByHeight(t *testing.T) { + st := newMockStore() + now := time.Now().UTC().Truncate(time.Second) + st.headers[42] = &types.Header{ + Height: 42, + Hash: []byte("hash"), + DataHash: []byte("dh"), + Time: now, + RawHeader: []byte("raw"), + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + client := startTestHeaderServer(t, svc) + + resp, err := client.GetByHeight(context.Background(), &pb.GetByHeightRequest{Height: 42}) + if err != nil { + t.Fatalf("GetByHeight: %v", err) + } + if resp.Header.Height != 42 { + t.Errorf("Height = %d, want 42", resp.Header.Height) + } + if string(resp.Header.Hash) != "hash" { + t.Errorf("Hash = %q, want %q", resp.Header.Hash, "hash") + } +} + +func TestGRPCHeaderLocalHead(t *testing.T) { + st := newMockStore() + st.syncState = &types.SyncStatus{LatestHeight: 100} + st.headers[100] = &types.Header{ + Height: 100, + Hash: []byte("hash100"), + RawHeader: []byte("raw"), + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + client := startTestHeaderServer(t, svc) + + resp, err := client.LocalHead(context.Background(), &pb.LocalHeadRequest{}) + if err != nil { + t.Fatalf("LocalHead: %v", err) + } + if resp.Header.Height != 100 { + t.Errorf("Height = %d, want 100", resp.Header.Height) + } +} + +func TestGRPCHeaderNetworkHead(t *testing.T) { + ft := &mockFetcher{ + networkHead: &types.Header{ + Height: 200, + Hash: []byte("hash200"), + RawHeader: []byte("raw"), + }, + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(newMockStore(), ft, nil, notifier, zerolog.Nop()) + client := startTestHeaderServer(t, svc) + + resp, err := client.NetworkHead(context.Background(), &pb.NetworkHeadRequest{}) + if err != nil { + t.Fatalf("NetworkHead: %v", err) + } + if resp.Header.Height != 200 { + t.Errorf("Height = %d, want 200", resp.Header.Height) + } +} + +func TestGRPCBlobSubscribe(t *testing.T) { + st := newMockStore() + ns := testNamespace(1) + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + client := startTestServer(t, svc) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + stream, err := client.Subscribe(ctx, &pb.BlobServiceSubscribeRequest{ + Namespace: ns[:], + }) + if err != nil { + t.Fatalf("Subscribe: %v", err) + } + + // Wait for server-side subscription to be established. + deadline := time.After(5 * time.Second) + for notifier.SubscriberCount() == 0 { + select { + case <-deadline: + t.Fatal("timed out waiting for subscriber registration") + default: + time.Sleep(10 * time.Millisecond) + } + } + + // Publish an event. + notifier.Publish(api.HeightEvent{ + Height: 1, + Header: &types.Header{Height: 1}, + Blobs: []types.Blob{ + {Height: 1, Namespace: ns, Data: []byte("d1"), Index: 0}, + }, + }) + + ev, err := stream.Recv() + if err != nil { + t.Fatalf("Recv: %v", err) + } + if ev.Height != 1 { + t.Errorf("Height = %d, want 1", ev.Height) + } + if len(ev.Blobs) != 1 { + t.Errorf("Blobs = %d, want 1", len(ev.Blobs)) + } +} diff --git a/pkg/api/jsonrpc/blob.go b/pkg/api/jsonrpc/blob.go new file mode 100644 index 0000000..87b2e50 --- /dev/null +++ b/pkg/api/jsonrpc/blob.go @@ -0,0 +1,103 @@ +package jsonrpc + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/evstack/apex/pkg/api" + "github.com/evstack/apex/pkg/types" +) + +// BlobHandler implements the celestia-node blob JSON-RPC namespace. +type BlobHandler struct { + svc *api.Service +} + +// Get returns a single blob matching the namespace and commitment at the given height. +func (h *BlobHandler) Get(ctx context.Context, height uint64, namespace []byte, commitment []byte) (json.RawMessage, error) { + ns, err := bytesToNamespace(namespace) + if err != nil { + return nil, err + } + return h.svc.BlobGet(ctx, height, ns, commitment) +} + +// GetAll returns all blobs for the given namespaces at the given height. +func (h *BlobHandler) GetAll(ctx context.Context, height uint64, namespaces [][]byte) (json.RawMessage, error) { + nsList := make([]types.Namespace, len(namespaces)) + for i, nsBytes := range namespaces { + ns, err := bytesToNamespace(nsBytes) + if err != nil { + return nil, err + } + nsList[i] = ns + } + return h.svc.BlobGetAll(ctx, height, nsList, 0, 0) +} + +// Subscribe returns a channel of blob events for the given namespace. +// Only available over WebSocket. +func (h *BlobHandler) Subscribe(ctx context.Context, namespace []byte) (<-chan json.RawMessage, error) { + ns, err := bytesToNamespace(namespace) + if err != nil { + return nil, err + } + + sub := h.svc.BlobSubscribe(ns) + out := make(chan json.RawMessage, cap(sub.Events())) + + go func() { + defer close(out) + defer h.svc.Notifier().Unsubscribe(sub) + for { + select { + case <-ctx.Done(): + return + case ev, ok := <-sub.Events(): + if !ok { + return + } + for i := range ev.Blobs { + raw := api.MarshalBlob(&ev.Blobs[i]) + select { + case out <- raw: + case <-ctx.Done(): + return + } + } + } + } + }() + + return out, nil +} + +// GetProof forwards a proof request to the upstream Celestia node. +func (h *BlobHandler) GetProof(ctx context.Context, height uint64, namespace, commitment []byte) (json.RawMessage, error) { + return h.svc.BlobGetProof(ctx, height, namespace, commitment) +} + +// Included forwards an inclusion check to the upstream Celestia node. +func (h *BlobHandler) Included(ctx context.Context, height uint64, namespace []byte, proof json.RawMessage, commitment []byte) (bool, error) { + return h.svc.BlobIncluded(ctx, height, namespace, proof, commitment) +} + +// GetCommitmentProof is not supported by the indexer. +func (h *BlobHandler) GetCommitmentProof(_ context.Context, _ uint64, _ []byte, _ []byte) (json.RawMessage, error) { + return nil, errNotSupported +} + +// Submit is not supported — apex is read-only. +func (h *BlobHandler) Submit(_ context.Context, _ json.RawMessage, _ json.RawMessage) (json.RawMessage, error) { + return nil, errReadOnly +} + +func bytesToNamespace(b []byte) (types.Namespace, error) { + if len(b) != types.NamespaceSize { + return types.Namespace{}, fmt.Errorf("invalid namespace size: got %d, want %d", len(b), types.NamespaceSize) + } + var ns types.Namespace + copy(ns[:], b) + return ns, nil +} diff --git a/pkg/api/jsonrpc/header.go b/pkg/api/jsonrpc/header.go new file mode 100644 index 0000000..34eef96 --- /dev/null +++ b/pkg/api/jsonrpc/header.go @@ -0,0 +1,59 @@ +package jsonrpc + +import ( + "context" + "encoding/json" + + "github.com/evstack/apex/pkg/api" +) + +// HeaderHandler implements the celestia-node header JSON-RPC namespace. +type HeaderHandler struct { + svc *api.Service +} + +// GetByHeight returns the raw header JSON at the given height. +func (h *HeaderHandler) GetByHeight(ctx context.Context, height uint64) (json.RawMessage, error) { + return h.svc.HeaderGetByHeight(ctx, height) +} + +// LocalHead returns the header at the latest synced height. +func (h *HeaderHandler) LocalHead(ctx context.Context) (json.RawMessage, error) { + return h.svc.HeaderLocalHead(ctx) +} + +// NetworkHead returns the current network head from the upstream node. +func (h *HeaderHandler) NetworkHead(ctx context.Context) (json.RawMessage, error) { + return h.svc.HeaderNetworkHead(ctx) +} + +// Subscribe returns a channel of header events. +// Only available over WebSocket. +func (h *HeaderHandler) Subscribe(ctx context.Context) (<-chan json.RawMessage, error) { + sub := h.svc.HeaderSubscribe() + out := make(chan json.RawMessage, cap(sub.Events())) + + go func() { + defer close(out) + defer h.svc.Notifier().Unsubscribe(sub) + for { + select { + case <-ctx.Done(): + return + case ev, ok := <-sub.Events(): + if !ok { + return + } + if ev.Header != nil && ev.Header.RawHeader != nil { + select { + case out <- json.RawMessage(ev.Header.RawHeader): + case <-ctx.Done(): + return + } + } + } + } + }() + + return out, nil +} diff --git a/pkg/api/jsonrpc/server.go b/pkg/api/jsonrpc/server.go new file mode 100644 index 0000000..3a57efb --- /dev/null +++ b/pkg/api/jsonrpc/server.go @@ -0,0 +1,24 @@ +package jsonrpc + +import ( + gorpc "github.com/filecoin-project/go-jsonrpc" + "github.com/rs/zerolog" + + "github.com/evstack/apex/pkg/api" +) + +// NewServer creates a JSON-RPC server with celestia-node compatible method +// names. The returned server implements http.Handler and supports both +// HTTP and WebSocket connections. +func NewServer(svc *api.Service, log zerolog.Logger) *gorpc.RPCServer { + srv := gorpc.NewServer() + + srv.Register("blob", &BlobHandler{svc: svc}) + srv.Register("header", &HeaderHandler{svc: svc}) + srv.Register("share", &ShareStub{}) + srv.Register("fraud", &FraudStub{}) + srv.Register("blobstream", &BlobstreamStub{}) + + log.Info().Msg("JSON-RPC server initialized") + return srv +} diff --git a/pkg/api/jsonrpc/server_test.go b/pkg/api/jsonrpc/server_test.go new file mode 100644 index 0000000..21fbe3c --- /dev/null +++ b/pkg/api/jsonrpc/server_test.go @@ -0,0 +1,290 @@ +package jsonrpc + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/rs/zerolog" + + "github.com/evstack/apex/pkg/api" + "github.com/evstack/apex/pkg/store" + "github.com/evstack/apex/pkg/types" +) + +// mockStore for JSON-RPC handler tests. +type mockStore struct { + headers map[uint64]*types.Header + blobs map[uint64][]types.Blob + syncState *types.SyncStatus +} + +func newMockStore() *mockStore { + return &mockStore{ + headers: make(map[uint64]*types.Header), + blobs: make(map[uint64][]types.Blob), + } +} + +func (m *mockStore) PutBlobs(_ context.Context, blobs []types.Blob) error { + for _, b := range blobs { + m.blobs[b.Height] = append(m.blobs[b.Height], b) + } + return nil +} + +func (m *mockStore) GetBlob(_ context.Context, ns types.Namespace, height uint64, index int) (*types.Blob, error) { + for _, b := range m.blobs[height] { + if b.Namespace == ns && b.Index == index { + return &b, nil + } + } + return nil, store.ErrNotFound +} + +func (m *mockStore) GetBlobs(_ context.Context, ns types.Namespace, startHeight, endHeight uint64, _, _ int) ([]types.Blob, error) { + var result []types.Blob + for h := startHeight; h <= endHeight; h++ { + for _, b := range m.blobs[h] { + if b.Namespace == ns { + result = append(result, b) + } + } + } + return result, nil +} + +func (m *mockStore) PutHeader(_ context.Context, h *types.Header) error { + m.headers[h.Height] = h + return nil +} + +func (m *mockStore) GetHeader(_ context.Context, height uint64) (*types.Header, error) { + h, ok := m.headers[height] + if !ok { + return nil, store.ErrNotFound + } + return h, nil +} + +func (m *mockStore) PutNamespace(_ context.Context, _ types.Namespace) error { return nil } +func (m *mockStore) GetNamespaces(_ context.Context) ([]types.Namespace, error) { return nil, nil } + +func (m *mockStore) GetSyncState(_ context.Context) (*types.SyncStatus, error) { + if m.syncState == nil { + return nil, store.ErrNotFound + } + return m.syncState, nil +} + +func (m *mockStore) SetSyncState(_ context.Context, s types.SyncStatus) error { + m.syncState = &s + return nil +} + +func (m *mockStore) Close() error { return nil } + +type mockFetcher struct { + networkHead *types.Header +} + +func (f *mockFetcher) GetHeader(_ context.Context, _ uint64) (*types.Header, error) { + return nil, errors.New("not implemented") +} + +func (f *mockFetcher) GetBlobs(_ context.Context, _ uint64, _ []types.Namespace) ([]types.Blob, error) { + return nil, nil +} + +func (f *mockFetcher) GetNetworkHead(_ context.Context) (*types.Header, error) { + if f.networkHead == nil { + return nil, errors.New("no network head") + } + return f.networkHead, nil +} + +func (f *mockFetcher) SubscribeHeaders(_ context.Context) (<-chan *types.Header, error) { + return make(chan *types.Header), nil +} + +func (f *mockFetcher) Close() error { return nil } + +func testNamespace(b byte) types.Namespace { + var ns types.Namespace + ns[types.NamespaceSize-1] = b + return ns +} + +type jsonRPCRequest struct { + Jsonrpc string `json:"jsonrpc"` + Method string `json:"method"` + Params []any `json:"params"` + ID int `json:"id"` +} + +type jsonRPCResponse struct { + Jsonrpc string `json:"jsonrpc"` + Result json.RawMessage `json:"result"` + Error *jsonRPCError `json:"error,omitempty"` + ID int `json:"id"` +} + +type jsonRPCError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +func doRPC(t *testing.T, srv http.Handler, method string, params ...any) jsonRPCResponse { + t.Helper() + + if params == nil { + params = []any{} + } + + req := jsonRPCRequest{ + Jsonrpc: "2.0", + Method: method, + Params: params, + ID: 1, + } + body, err := json.Marshal(req) + if err != nil { + t.Fatalf("marshal request: %v", err) + } + + httpReq := httptest.NewRequest(http.MethodPost, "/", bytes.NewReader(body)) + httpReq.Header.Set("Content-Type", "application/json") + w := httptest.NewRecorder() + srv.ServeHTTP(w, httpReq) + + resp := w.Result() + defer resp.Body.Close() //nolint:errcheck + respBody, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("read response: %v", err) + } + + var rpcResp jsonRPCResponse + if err := json.Unmarshal(respBody, &rpcResp); err != nil { + t.Fatalf("unmarshal response: %v (body: %s)", err, respBody) + } + return rpcResp +} + +func TestJSONRPCHeaderGetByHeight(t *testing.T) { + st := newMockStore() + st.headers[42] = &types.Header{ + Height: 42, + RawHeader: []byte(`{"height":"42"}`), + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + srv := NewServer(svc, zerolog.Nop()) + + resp := doRPC(t, srv, "header.GetByHeight", uint64(42)) + if resp.Error != nil { + t.Fatalf("RPC error: %s", resp.Error.Message) + } + + // Result should be the raw header JSON. + if string(resp.Result) != `{"height":"42"}` { + t.Errorf("result = %s, want raw header", resp.Result) + } +} + +func TestJSONRPCHeaderLocalHead(t *testing.T) { + st := newMockStore() + st.syncState = &types.SyncStatus{LatestHeight: 100} + st.headers[100] = &types.Header{ + Height: 100, + RawHeader: []byte(`{"height":"100"}`), + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + srv := NewServer(svc, zerolog.Nop()) + + resp := doRPC(t, srv, "header.LocalHead") + if resp.Error != nil { + t.Fatalf("RPC error: %s", resp.Error.Message) + } + if string(resp.Result) != `{"height":"100"}` { + t.Errorf("result = %s", resp.Result) + } +} + +func TestJSONRPCHeaderNetworkHead(t *testing.T) { + ft := &mockFetcher{ + networkHead: &types.Header{ + Height: 200, + RawHeader: []byte(`{"height":"200"}`), + }, + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(newMockStore(), ft, nil, notifier, zerolog.Nop()) + srv := NewServer(svc, zerolog.Nop()) + + resp := doRPC(t, srv, "header.NetworkHead") + if resp.Error != nil { + t.Fatalf("RPC error: %s", resp.Error.Message) + } + if string(resp.Result) != `{"height":"200"}` { + t.Errorf("result = %s", resp.Result) + } +} + +func TestJSONRPCBlobGetAll(t *testing.T) { + st := newMockStore() + ns := testNamespace(1) + st.blobs[10] = []types.Blob{ + {Height: 10, Namespace: ns, Data: []byte("d1"), Commitment: []byte("c1"), Index: 0}, + } + + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(st, &mockFetcher{}, nil, notifier, zerolog.Nop()) + srv := NewServer(svc, zerolog.Nop()) + + resp := doRPC(t, srv, "blob.GetAll", uint64(10), [][]byte{ns[:]}) + if resp.Error != nil { + t.Fatalf("RPC error: %s", resp.Error.Message) + } + + var blobs []json.RawMessage + if err := json.Unmarshal(resp.Result, &blobs); err != nil { + t.Fatalf("unmarshal blobs: %v", err) + } + if len(blobs) != 1 { + t.Errorf("got %d blobs, want 1", len(blobs)) + } +} + +func TestJSONRPCStubMethods(t *testing.T) { + notifier := api.NewNotifier(16, zerolog.Nop()) + svc := api.NewService(newMockStore(), &mockFetcher{}, nil, notifier, zerolog.Nop()) + srv := NewServer(svc, zerolog.Nop()) + + tests := []struct { + method string + params []any + }{ + {"share.GetShare", []any{uint64(1), 0, 0}}, + {"share.GetEDS", []any{uint64(1)}}, + {"fraud.Get", []any{"befp"}}, + } + + for _, tt := range tests { + t.Run(tt.method, func(t *testing.T) { + resp := doRPC(t, srv, tt.method, tt.params...) + if resp.Error == nil { + t.Error("expected error from stub method") + } + }) + } +} diff --git a/pkg/api/jsonrpc/stubs.go b/pkg/api/jsonrpc/stubs.go new file mode 100644 index 0000000..1c2ff98 --- /dev/null +++ b/pkg/api/jsonrpc/stubs.go @@ -0,0 +1,56 @@ +package jsonrpc + +import ( + "context" + "encoding/json" + "fmt" +) + +var ( + errNotSupported = fmt.Errorf("method not supported by apex indexer") + errReadOnly = fmt.Errorf("apex is a read-only indexer, blob submission not supported") +) + +// ShareStub holds stub methods for the share namespace. +type ShareStub struct{} + +// GetShare is not supported. +func (s *ShareStub) GetShare(_ context.Context, _ uint64, _ int, _ int) (json.RawMessage, error) { + return nil, errNotSupported +} + +// GetEDS is not supported. +func (s *ShareStub) GetEDS(_ context.Context, _ uint64) (json.RawMessage, error) { + return nil, errNotSupported +} + +// GetRange is not supported. +func (s *ShareStub) GetRange(_ context.Context, _ uint64, _ int, _ int) (json.RawMessage, error) { + return nil, errNotSupported +} + +// FraudStub holds stub methods for the fraud namespace. +type FraudStub struct{} + +// Get is not supported. +func (s *FraudStub) Get(_ context.Context, _ string) (json.RawMessage, error) { + return nil, errNotSupported +} + +// Subscribe is not supported. +func (s *FraudStub) Subscribe(_ context.Context, _ string) (<-chan json.RawMessage, error) { + return nil, errNotSupported +} + +// BlobstreamStub holds stub methods for the blobstream namespace. +type BlobstreamStub struct{} + +// GetDataRootTupleRoot is not supported. +func (s *BlobstreamStub) GetDataRootTupleRoot(_ context.Context, _ uint64, _ uint64) (json.RawMessage, error) { + return nil, errNotSupported +} + +// GetDataRootTupleInclusionProof is not supported. +func (s *BlobstreamStub) GetDataRootTupleInclusionProof(_ context.Context, _ uint64, _ uint64, _ uint64) (json.RawMessage, error) { + return nil, errNotSupported +} diff --git a/pkg/api/notifier.go b/pkg/api/notifier.go new file mode 100644 index 0000000..a8b5e23 --- /dev/null +++ b/pkg/api/notifier.go @@ -0,0 +1,158 @@ +package api + +import ( + "sync" + "sync/atomic" + + "github.com/rs/zerolog" + + "github.com/evstack/apex/pkg/types" +) + +// HeightEvent is published when a new height is processed. +type HeightEvent struct { + Height uint64 + Header *types.Header + Blobs []types.Blob +} + +// Subscription receives height events from the Notifier. +type Subscription struct { + id uint64 + ch chan HeightEvent + namespaces map[types.Namespace]struct{} // empty = all namespaces + lastHeight uint64 +} + +// Events returns the channel on which events are delivered. +func (s *Subscription) Events() <-chan HeightEvent { + return s.ch +} + +// Notifier fans out height events to subscribed API clients. +type Notifier struct { + mu sync.RWMutex + subscribers map[uint64]*Subscription + nextID atomic.Uint64 + bufferSize int + log zerolog.Logger +} + +// NewNotifier creates a Notifier with the given per-subscriber buffer size. +func NewNotifier(bufferSize int, log zerolog.Logger) *Notifier { + if bufferSize <= 0 { + bufferSize = 64 + } + return &Notifier{ + subscribers: make(map[uint64]*Subscription), + bufferSize: bufferSize, + log: log.With().Str("component", "notifier").Logger(), + } +} + +// Subscribe creates a new subscription. If namespaces is empty, all blobs are +// delivered. The returned Subscription must be cleaned up via Unsubscribe. +func (n *Notifier) Subscribe(namespaces []types.Namespace) *Subscription { + id := n.nextID.Add(1) + nsSet := make(map[types.Namespace]struct{}, len(namespaces)) + for _, ns := range namespaces { + nsSet[ns] = struct{}{} + } + + sub := &Subscription{ + id: id, + ch: make(chan HeightEvent, n.bufferSize), + namespaces: nsSet, + } + + n.mu.Lock() + n.subscribers[id] = sub + n.mu.Unlock() + + n.log.Debug().Uint64("sub_id", id).Int("namespaces", len(namespaces)).Msg("new subscription") + return sub +} + +// Unsubscribe removes a subscription and closes its channel. +func (n *Notifier) Unsubscribe(sub *Subscription) { + n.mu.Lock() + if _, ok := n.subscribers[sub.id]; ok { + delete(n.subscribers, sub.id) + close(sub.ch) + } + n.mu.Unlock() +} + +// SubscriberCount returns the current number of active subscribers. +func (n *Notifier) SubscriberCount() int { + n.mu.RLock() + defer n.mu.RUnlock() + return len(n.subscribers) +} + +// Publish sends an event to all matching subscribers. Non-blocking: if a +// subscriber's buffer is full the event is dropped and a gap is marked. +func (n *Notifier) Publish(event HeightEvent) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, sub := range n.subscribers { + filtered := n.filterEvent(event, sub) + + // Warn at 75% buffer capacity. + usage := len(sub.ch) + threshold := n.bufferSize * 3 / 4 + if usage >= threshold { + n.log.Warn(). + Uint64("sub_id", sub.id). + Int("buffered", usage). + Int("capacity", n.bufferSize). + Msg("subscription buffer near capacity") + } + + // Check contiguity: if lastHeight is set and this isn't the next height, + // log a gap warning. + if sub.lastHeight > 0 && event.Height != sub.lastHeight+1 { + n.log.Warn(). + Uint64("sub_id", sub.id). + Uint64("expected", sub.lastHeight+1). + Uint64("got", event.Height). + Msg("non-contiguous event delivery") + } + + // Non-blocking send. + select { + case sub.ch <- filtered: + sub.lastHeight = event.Height + default: + n.log.Warn(). + Uint64("sub_id", sub.id). + Uint64("height", event.Height). + Msg("subscriber buffer full, event dropped") + // Reset lastHeight so next delivery triggers a gap warning. + sub.lastHeight = 0 + } + } +} + +// filterEvent returns an event with blobs filtered to the subscriber's +// namespace set. If the subscriber watches all namespaces, the event is +// returned as-is. +func (n *Notifier) filterEvent(event HeightEvent, sub *Subscription) HeightEvent { + if len(sub.namespaces) == 0 { + return event + } + + filtered := make([]types.Blob, 0, len(event.Blobs)) + for i := range event.Blobs { + if _, ok := sub.namespaces[event.Blobs[i].Namespace]; ok { + filtered = append(filtered, event.Blobs[i]) + } + } + + return HeightEvent{ + Height: event.Height, + Header: event.Header, + Blobs: filtered, + } +} diff --git a/pkg/api/notifier_test.go b/pkg/api/notifier_test.go new file mode 100644 index 0000000..2574139 --- /dev/null +++ b/pkg/api/notifier_test.go @@ -0,0 +1,186 @@ +package api + +import ( + "testing" + "time" + + "github.com/rs/zerolog" + + "github.com/evstack/apex/pkg/types" +) + +func testNamespace(b byte) types.Namespace { + var ns types.Namespace + ns[types.NamespaceSize-1] = b + return ns +} + +func makeEvent(height uint64, namespaces ...types.Namespace) HeightEvent { + blobs := make([]types.Blob, len(namespaces)) + for i, ns := range namespaces { + blobs[i] = types.Blob{ + Height: height, + Namespace: ns, + Data: []byte("data"), + Index: i, + } + } + return HeightEvent{ + Height: height, + Header: &types.Header{Height: height}, + Blobs: blobs, + } +} + +func TestNotifierSubscribePublish(t *testing.T) { + n := NewNotifier(16, zerolog.Nop()) + sub := n.Subscribe(nil) // all namespaces + defer n.Unsubscribe(sub) + + event := makeEvent(1, testNamespace(1), testNamespace(2)) + n.Publish(event) + + select { + case got := <-sub.Events(): + if got.Height != 1 { + t.Errorf("Height = %d, want 1", got.Height) + } + if len(got.Blobs) != 2 { + t.Errorf("Blobs = %d, want 2", len(got.Blobs)) + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } +} + +func TestNotifierNamespaceFilter(t *testing.T) { + n := NewNotifier(16, zerolog.Nop()) + ns1 := testNamespace(1) + ns2 := testNamespace(2) + + sub := n.Subscribe([]types.Namespace{ns1}) + defer n.Unsubscribe(sub) + + event := makeEvent(1, ns1, ns2) + n.Publish(event) + + select { + case got := <-sub.Events(): + if len(got.Blobs) != 1 { + t.Fatalf("Blobs = %d, want 1", len(got.Blobs)) + } + if got.Blobs[0].Namespace != ns1 { + t.Errorf("Blob namespace = %s, want %s", got.Blobs[0].Namespace, ns1) + } + case <-time.After(time.Second): + t.Fatal("timed out waiting for event") + } +} + +func TestNotifierMultipleSubscribers(t *testing.T) { + n := NewNotifier(16, zerolog.Nop()) + sub1 := n.Subscribe(nil) + sub2 := n.Subscribe(nil) + defer n.Unsubscribe(sub1) + defer n.Unsubscribe(sub2) + + n.Publish(makeEvent(1)) + + for i, sub := range []*Subscription{sub1, sub2} { + select { + case got := <-sub.Events(): + if got.Height != 1 { + t.Errorf("sub%d: Height = %d, want 1", i, got.Height) + } + case <-time.After(time.Second): + t.Fatalf("sub%d: timed out", i) + } + } +} + +func TestNotifierBufferOverflow(t *testing.T) { + n := NewNotifier(2, zerolog.Nop()) + sub := n.Subscribe(nil) + defer n.Unsubscribe(sub) + + // Fill buffer. + n.Publish(makeEvent(1)) + n.Publish(makeEvent(2)) + // This should be dropped (non-blocking). + n.Publish(makeEvent(3)) + + // Drain and verify we got 1 and 2. + got1 := <-sub.Events() + got2 := <-sub.Events() + if got1.Height != 1 || got2.Height != 2 { + t.Errorf("got heights %d, %d; want 1, 2", got1.Height, got2.Height) + } + + // Channel should be empty now. + select { + case ev := <-sub.Events(): + t.Fatalf("unexpected event: height %d", ev.Height) + default: + // expected + } +} + +func TestNotifierUnsubscribe(t *testing.T) { + n := NewNotifier(16, zerolog.Nop()) + sub := n.Subscribe(nil) + + n.Unsubscribe(sub) + + // Channel should be closed. + _, ok := <-sub.Events() + if ok { + t.Fatal("expected channel to be closed after unsubscribe") + } + + // Double unsubscribe should not panic. + n.Unsubscribe(sub) +} + +func TestNotifierContiguityTracking(t *testing.T) { + // Verify that after a buffer overflow, lastHeight is reset. + // Next delivery should succeed without panic. + n := NewNotifier(1, zerolog.Nop()) + sub := n.Subscribe(nil) + defer n.Unsubscribe(sub) + + n.Publish(makeEvent(1)) + // Buffer full, event 2 dropped. + n.Publish(makeEvent(2)) + // Drain. + <-sub.Events() + // Event 3 should deliver fine even though 2 was dropped. + n.Publish(makeEvent(3)) + + select { + case got := <-sub.Events(): + if got.Height != 3 { + t.Errorf("Height = %d, want 3", got.Height) + } + case <-time.After(time.Second): + t.Fatal("timed out") + } +} + +func TestNotifierEmptyNamespaceSetDeliversAll(t *testing.T) { + n := NewNotifier(16, zerolog.Nop()) + sub := n.Subscribe([]types.Namespace{}) // explicit empty slice + defer n.Unsubscribe(sub) + + ns1 := testNamespace(1) + ns2 := testNamespace(2) + n.Publish(makeEvent(1, ns1, ns2)) + + select { + case got := <-sub.Events(): + if len(got.Blobs) != 2 { + t.Errorf("Blobs = %d, want 2 (empty namespace set = all)", len(got.Blobs)) + } + case <-time.After(time.Second): + t.Fatal("timed out") + } +} diff --git a/pkg/api/service.go b/pkg/api/service.go new file mode 100644 index 0000000..e8fa191 --- /dev/null +++ b/pkg/api/service.go @@ -0,0 +1,180 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/rs/zerolog" + + "github.com/evstack/apex/pkg/fetch" + "github.com/evstack/apex/pkg/store" + "github.com/evstack/apex/pkg/types" +) + +// Service is the shared business logic layer used by both JSON-RPC and gRPC +// handlers. It reads from the store, forwards proofs upstream, and manages +// subscriptions via the Notifier. +type Service struct { + store store.Store + fetcher fetch.DataFetcher + proof fetch.ProofForwarder + notifier *Notifier + log zerolog.Logger +} + +// NewService creates a new API service. proof may be nil if upstream proof +// forwarding is not available. +func NewService(s store.Store, f fetch.DataFetcher, proof fetch.ProofForwarder, n *Notifier, log zerolog.Logger) *Service { + return &Service{ + store: s, + fetcher: f, + proof: proof, + notifier: n, + log: log.With().Str("component", "api-service").Logger(), + } +} + +// BlobGet returns a single blob matching the namespace and commitment at the +// given height. Returns the blob as celestia-node compatible JSON. +func (s *Service) BlobGet(ctx context.Context, height uint64, namespace types.Namespace, commitment []byte) (json.RawMessage, error) { + blobs, err := s.store.GetBlobs(ctx, namespace, height, height, 0, 0) + if err != nil { + return nil, fmt.Errorf("get blobs: %w", err) + } + + for i := range blobs { + if bytes.Equal(blobs[i].Commitment, commitment) { + return MarshalBlob(&blobs[i]), nil + } + } + + return nil, store.ErrNotFound +} + +// BlobGetAll returns all blobs for the given namespaces at the given height. +// limit=0 means no limit; offset=0 means no offset. +// Pagination is applied to the aggregate result across all namespaces. +func (s *Service) BlobGetAll(ctx context.Context, height uint64, namespaces []types.Namespace, limit, offset int) (json.RawMessage, error) { + var allBlobs []types.Blob + for _, ns := range namespaces { + blobs, err := s.store.GetBlobs(ctx, ns, height, height, 0, 0) + if err != nil { + return nil, fmt.Errorf("get blobs for namespace %s: %w", ns, err) + } + allBlobs = append(allBlobs, blobs...) + } + + // Apply pagination to the aggregate result. + if offset > 0 { + if offset >= len(allBlobs) { + allBlobs = nil + } else { + allBlobs = allBlobs[offset:] + } + } + if limit > 0 && limit < len(allBlobs) { + allBlobs = allBlobs[:limit] + } + + if len(allBlobs) == 0 { + return json.RawMessage("null"), nil + } + + result := make([]json.RawMessage, len(allBlobs)) + for i := range allBlobs { + result[i] = MarshalBlob(&allBlobs[i]) + } + + out, err := json.Marshal(result) + if err != nil { + return nil, fmt.Errorf("marshal blobs: %w", err) + } + return out, nil +} + +// BlobGetProof forwards a proof request to the upstream Celestia node. +func (s *Service) BlobGetProof(ctx context.Context, height uint64, namespace, commitment []byte) (json.RawMessage, error) { + if s.proof == nil { + return nil, fmt.Errorf("proof forwarding not available") + } + return s.proof.GetProof(ctx, height, namespace, commitment) +} + +// BlobIncluded forwards an inclusion check to the upstream Celestia node. +func (s *Service) BlobIncluded(ctx context.Context, height uint64, namespace []byte, proof json.RawMessage, commitment []byte) (bool, error) { + if s.proof == nil { + return false, fmt.Errorf("proof forwarding not available") + } + return s.proof.Included(ctx, height, namespace, proof, commitment) +} + +// BlobSubscribe creates a subscription for blobs in the given namespace. +func (s *Service) BlobSubscribe(namespace types.Namespace) *Subscription { + return s.notifier.Subscribe([]types.Namespace{namespace}) +} + +// HeaderGetByHeight returns the raw header JSON at the given height. +func (s *Service) HeaderGetByHeight(ctx context.Context, height uint64) (json.RawMessage, error) { + hdr, err := s.store.GetHeader(ctx, height) + if err != nil { + return nil, fmt.Errorf("get header: %w", err) + } + return hdr.RawHeader, nil +} + +// HeaderLocalHead returns the header at the latest synced height. +func (s *Service) HeaderLocalHead(ctx context.Context) (json.RawMessage, error) { + ss, err := s.store.GetSyncState(ctx) + if err != nil { + return nil, fmt.Errorf("get sync state: %w", err) + } + hdr, err := s.store.GetHeader(ctx, ss.LatestHeight) + if err != nil { + return nil, fmt.Errorf("get header at height %d: %w", ss.LatestHeight, err) + } + return hdr.RawHeader, nil +} + +// HeaderNetworkHead returns the current network head from the upstream node. +func (s *Service) HeaderNetworkHead(ctx context.Context) (json.RawMessage, error) { + hdr, err := s.fetcher.GetNetworkHead(ctx) + if err != nil { + return nil, fmt.Errorf("get network head: %w", err) + } + return hdr.RawHeader, nil +} + +// HeaderSubscribe creates a subscription for all new headers. +func (s *Service) HeaderSubscribe() *Subscription { + return s.notifier.Subscribe(nil) +} + +// Notifier returns the service's notifier for direct access. +func (s *Service) Notifier() *Notifier { + return s.notifier +} + +// Store returns the underlying store for direct access. +func (s *Service) Store() store.Store { + return s.store +} + +// Fetcher returns the underlying fetcher for direct access. +func (s *Service) Fetcher() fetch.DataFetcher { + return s.fetcher +} + +// MarshalBlob converts a stored blob into celestia-node compatible JSON. +func MarshalBlob(b *types.Blob) json.RawMessage { + m := map[string]any{ + "namespace": b.Namespace[:], + "data": b.Data, + "share_version": b.ShareVersion, + "commitment": b.Commitment, + "index": b.Index, + } + raw, _ := json.Marshal(m) //nolint:errcheck + return raw +} diff --git a/pkg/api/service_test.go b/pkg/api/service_test.go new file mode 100644 index 0000000..04ff6d0 --- /dev/null +++ b/pkg/api/service_test.go @@ -0,0 +1,260 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "testing" + + "github.com/rs/zerolog" + + "github.com/evstack/apex/pkg/store" + "github.com/evstack/apex/pkg/types" +) + +// mockStore for service tests. +type mockStore struct { + headers map[uint64]*types.Header + blobs map[uint64][]types.Blob + syncState *types.SyncStatus +} + +func newMockStore() *mockStore { + return &mockStore{ + headers: make(map[uint64]*types.Header), + blobs: make(map[uint64][]types.Blob), + } +} + +func (m *mockStore) PutBlobs(_ context.Context, blobs []types.Blob) error { + for _, b := range blobs { + m.blobs[b.Height] = append(m.blobs[b.Height], b) + } + return nil +} + +func (m *mockStore) GetBlob(_ context.Context, ns types.Namespace, height uint64, index int) (*types.Blob, error) { + for _, b := range m.blobs[height] { + if b.Namespace == ns && b.Index == index { + return &b, nil + } + } + return nil, store.ErrNotFound +} + +func (m *mockStore) GetBlobs(_ context.Context, ns types.Namespace, startHeight, endHeight uint64, _, _ int) ([]types.Blob, error) { + var result []types.Blob + for h := startHeight; h <= endHeight; h++ { + for _, b := range m.blobs[h] { + if b.Namespace == ns { + result = append(result, b) + } + } + } + return result, nil +} + +func (m *mockStore) PutHeader(_ context.Context, h *types.Header) error { + m.headers[h.Height] = h + return nil +} + +func (m *mockStore) GetHeader(_ context.Context, height uint64) (*types.Header, error) { + h, ok := m.headers[height] + if !ok { + return nil, store.ErrNotFound + } + return h, nil +} + +func (m *mockStore) PutNamespace(_ context.Context, _ types.Namespace) error { return nil } + +func (m *mockStore) GetNamespaces(_ context.Context) ([]types.Namespace, error) { + return nil, nil +} + +func (m *mockStore) GetSyncState(_ context.Context) (*types.SyncStatus, error) { + if m.syncState == nil { + return nil, store.ErrNotFound + } + return m.syncState, nil +} + +func (m *mockStore) SetSyncState(_ context.Context, s types.SyncStatus) error { + m.syncState = &s + return nil +} + +func (m *mockStore) Close() error { return nil } + +// mockFetcher for service tests. +type mockFetcher struct { + networkHead *types.Header +} + +func (m *mockFetcher) GetHeader(_ context.Context, _ uint64) (*types.Header, error) { + return nil, errors.New("not implemented") +} + +func (m *mockFetcher) GetBlobs(_ context.Context, _ uint64, _ []types.Namespace) ([]types.Blob, error) { + return nil, nil +} + +func (m *mockFetcher) GetNetworkHead(_ context.Context) (*types.Header, error) { + if m.networkHead == nil { + return nil, errors.New("no network head") + } + return m.networkHead, nil +} + +func (m *mockFetcher) SubscribeHeaders(_ context.Context) (<-chan *types.Header, error) { + return make(chan *types.Header), nil +} + +func (m *mockFetcher) Close() error { return nil } + +func TestServiceBlobGet(t *testing.T) { + st := newMockStore() + ns := testNamespace(1) + commitment := []byte("c1") + + st.blobs[10] = []types.Blob{ + {Height: 10, Namespace: ns, Data: []byte("d1"), Commitment: commitment, Index: 0}, + {Height: 10, Namespace: ns, Data: []byte("d2"), Commitment: []byte("c2"), Index: 1}, + } + + svc := NewService(st, &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + raw, err := svc.BlobGet(context.Background(), 10, ns, commitment) + if err != nil { + t.Fatalf("BlobGet: %v", err) + } + + var m map[string]json.RawMessage + if err := json.Unmarshal(raw, &m); err != nil { + t.Fatalf("unmarshal blob: %v", err) + } + if _, ok := m["commitment"]; !ok { + t.Error("blob JSON missing 'commitment' field") + } +} + +func TestServiceBlobGetNotFound(t *testing.T) { + st := newMockStore() + ns := testNamespace(1) + svc := NewService(st, &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + _, err := svc.BlobGet(context.Background(), 10, ns, []byte("missing")) + if !errors.Is(err, store.ErrNotFound) { + t.Fatalf("expected ErrNotFound, got %v", err) + } +} + +func TestServiceBlobGetAll(t *testing.T) { + st := newMockStore() + ns1 := testNamespace(1) + ns2 := testNamespace(2) + + st.blobs[10] = []types.Blob{ + {Height: 10, Namespace: ns1, Data: []byte("d1"), Commitment: []byte("c1"), Index: 0}, + {Height: 10, Namespace: ns2, Data: []byte("d2"), Commitment: []byte("c2"), Index: 0}, + } + + svc := NewService(st, &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + raw, err := svc.BlobGetAll(context.Background(), 10, []types.Namespace{ns1, ns2}, 0, 0) + if err != nil { + t.Fatalf("BlobGetAll: %v", err) + } + + var blobs []json.RawMessage + if err := json.Unmarshal(raw, &blobs); err != nil { + t.Fatalf("unmarshal blobs: %v", err) + } + if len(blobs) != 2 { + t.Errorf("got %d blobs, want 2", len(blobs)) + } +} + +func TestServiceBlobGetAllEmpty(t *testing.T) { + st := newMockStore() + svc := NewService(st, &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + raw, err := svc.BlobGetAll(context.Background(), 10, []types.Namespace{testNamespace(1)}, 0, 0) + if err != nil { + t.Fatalf("BlobGetAll: %v", err) + } + if string(raw) != "null" { + t.Errorf("expected null for empty blobs, got %s", raw) + } +} + +func TestServiceHeaderGetByHeight(t *testing.T) { + st := newMockStore() + st.headers[42] = &types.Header{ + Height: 42, + RawHeader: []byte(`{"height":"42"}`), + } + + svc := NewService(st, &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + raw, err := svc.HeaderGetByHeight(context.Background(), 42) + if err != nil { + t.Fatalf("HeaderGetByHeight: %v", err) + } + if string(raw) != `{"height":"42"}` { + t.Errorf("got %s, want raw header JSON", raw) + } +} + +func TestServiceHeaderLocalHead(t *testing.T) { + st := newMockStore() + st.syncState = &types.SyncStatus{LatestHeight: 100} + st.headers[100] = &types.Header{ + Height: 100, + RawHeader: []byte(`{"height":"100"}`), + } + + svc := NewService(st, &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + raw, err := svc.HeaderLocalHead(context.Background()) + if err != nil { + t.Fatalf("HeaderLocalHead: %v", err) + } + if string(raw) != `{"height":"100"}` { + t.Errorf("got %s", raw) + } +} + +func TestServiceHeaderNetworkHead(t *testing.T) { + ft := &mockFetcher{ + networkHead: &types.Header{ + Height: 200, + RawHeader: []byte(`{"height":"200"}`), + }, + } + + svc := NewService(newMockStore(), ft, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + raw, err := svc.HeaderNetworkHead(context.Background()) + if err != nil { + t.Fatalf("HeaderNetworkHead: %v", err) + } + if string(raw) != `{"height":"200"}` { + t.Errorf("got %s", raw) + } +} + +func TestServiceProofForwardingUnavailable(t *testing.T) { + svc := NewService(newMockStore(), &mockFetcher{}, nil, NewNotifier(16, zerolog.Nop()), zerolog.Nop()) + + _, err := svc.BlobGetProof(context.Background(), 1, nil, nil) + if err == nil { + t.Fatal("expected error for nil proof forwarder") + } + + _, err = svc.BlobIncluded(context.Background(), 1, nil, nil, nil) + if err == nil { + t.Fatal("expected error for nil proof forwarder") + } +} diff --git a/pkg/fetch/celestia_node.go b/pkg/fetch/celestia_node.go index 21ce956..f7241f9 100644 --- a/pkg/fetch/celestia_node.go +++ b/pkg/fetch/celestia_node.go @@ -26,7 +26,9 @@ type headerAPI struct { // blobAPI defines the JSON-RPC stubs for the Celestia "blob" namespace. type blobAPI struct { - GetAll func(ctx context.Context, height uint64, namespaces [][]byte) (json.RawMessage, error) + GetAll func(ctx context.Context, height uint64, namespaces [][]byte) (json.RawMessage, error) + GetProof func(ctx context.Context, height uint64, namespace []byte, commitment []byte) (json.RawMessage, error) + Included func(ctx context.Context, height uint64, namespace []byte, proof json.RawMessage, commitment []byte) (bool, error) } // CelestiaNodeFetcher implements DataFetcher using a Celestia node's JSON-RPC API. @@ -127,6 +129,24 @@ func (f *CelestiaNodeFetcher) SubscribeHeaders(ctx context.Context) (<-chan *typ return out, nil } +// GetProof forwards a blob proof request to the upstream Celestia node. +func (f *CelestiaNodeFetcher) GetProof(ctx context.Context, height uint64, namespace, commitment []byte) (json.RawMessage, error) { + raw, err := f.blob.GetProof(ctx, height, namespace, commitment) + if err != nil { + return nil, fmt.Errorf("blob.GetProof(%d): %w", height, err) + } + return raw, nil +} + +// Included forwards a blob inclusion check to the upstream Celestia node. +func (f *CelestiaNodeFetcher) Included(ctx context.Context, height uint64, namespace []byte, proof json.RawMessage, commitment []byte) (bool, error) { + ok, err := f.blob.Included(ctx, height, namespace, proof, commitment) + if err != nil { + return false, fmt.Errorf("blob.Included(%d): %w", height, err) + } + return ok, nil +} + func (f *CelestiaNodeFetcher) Close() error { f.mu.Lock() defer f.mu.Unlock() diff --git a/pkg/fetch/fetcher.go b/pkg/fetch/fetcher.go index 9b63ec3..ac03e37 100644 --- a/pkg/fetch/fetcher.go +++ b/pkg/fetch/fetcher.go @@ -2,6 +2,7 @@ package fetch import ( "context" + "encoding/json" "github.com/evstack/apex/pkg/types" ) @@ -19,3 +20,9 @@ type DataFetcher interface { Close() error } + +// ProofForwarder forwards proof-related requests to an upstream Celestia node. +type ProofForwarder interface { + GetProof(ctx context.Context, height uint64, namespace []byte, commitment []byte) (json.RawMessage, error) + Included(ctx context.Context, height uint64, namespace []byte, proof json.RawMessage, commitment []byte) (bool, error) +} diff --git a/pkg/store/sqlite.go b/pkg/store/sqlite.go index 60aea2e..b262ae7 100644 --- a/pkg/store/sqlite.go +++ b/pkg/store/sqlite.go @@ -6,6 +6,7 @@ import ( "embed" "errors" "fmt" + "runtime" "time" "github.com/evstack/apex/pkg/types" @@ -17,48 +18,76 @@ import ( var migrations embed.FS // SQLiteStore implements Store using modernc.org/sqlite (CGo-free). +// It maintains separate read and write connection pools to the same database. +// The writer is limited to a single connection (WAL single-writer constraint), +// while the reader pool allows concurrent API reads. type SQLiteStore struct { - db *sql.DB + writer *sql.DB + reader *sql.DB } +// maxReadConns is the upper bound for the read connection pool. +// Beyond ~8 readers, SQLite WAL contention outweighs parallelism gains. +const maxReadConns = 8 + // Open creates or opens a SQLite database at the given path. -// The database is configured with WAL journal mode, a single connection, -// and a 5-second busy timeout. +// The read pool is sized to min(NumCPU, 8). +// The database is configured with WAL journal mode and a 5-second busy timeout. func Open(path string) (*SQLiteStore, error) { - db, err := sql.Open("sqlite", path) - if err != nil { - return nil, fmt.Errorf("open sqlite: %w", err) + poolSize := runtime.NumCPU() + if poolSize > maxReadConns { + poolSize = maxReadConns } - // TODO(phase2): split into a write pool (max 1 conn) and a read pool - // (max N conns) so API reads don't block behind sync writes. WAL mode - // supports concurrent readers alongside a single writer. - db.SetMaxOpenConns(1) + writer, err := sql.Open("sqlite", path) + if err != nil { + return nil, fmt.Errorf("open sqlite writer: %w", err) + } + writer.SetMaxOpenConns(1) - if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil { - _ = db.Close() - return nil, fmt.Errorf("set WAL mode: %w", err) + if err := configureSQLite(writer); err != nil { + _ = writer.Close() + return nil, fmt.Errorf("configure writer: %w", err) } - if _, err := db.Exec("PRAGMA busy_timeout=5000"); err != nil { - _ = db.Close() - return nil, fmt.Errorf("set busy_timeout: %w", err) + + reader, err := sql.Open("sqlite", path) + if err != nil { + _ = writer.Close() + return nil, fmt.Errorf("open sqlite reader: %w", err) } - if _, err := db.Exec("PRAGMA foreign_keys=ON"); err != nil { - _ = db.Close() - return nil, fmt.Errorf("set foreign_keys: %w", err) + reader.SetMaxOpenConns(poolSize) + + if err := configureSQLite(reader); err != nil { + _ = writer.Close() + _ = reader.Close() + return nil, fmt.Errorf("configure reader: %w", err) } - s := &SQLiteStore{db: db} + s := &SQLiteStore{writer: writer, reader: reader} if err := s.migrate(); err != nil { - _ = db.Close() + _ = writer.Close() + _ = reader.Close() return nil, fmt.Errorf("migrate: %w", err) } return s, nil } +func configureSQLite(db *sql.DB) error { + if _, err := db.Exec("PRAGMA journal_mode=WAL"); err != nil { + return fmt.Errorf("set WAL mode: %w", err) + } + if _, err := db.Exec("PRAGMA busy_timeout=5000"); err != nil { + return fmt.Errorf("set busy_timeout: %w", err) + } + if _, err := db.Exec("PRAGMA foreign_keys=ON"); err != nil { + return fmt.Errorf("set foreign_keys: %w", err) + } + return nil +} + func (s *SQLiteStore) migrate() error { var version int - if err := s.db.QueryRow("PRAGMA user_version").Scan(&version); err != nil { + if err := s.writer.QueryRow("PRAGMA user_version").Scan(&version); err != nil { return fmt.Errorf("read user_version: %w", err) } @@ -71,7 +100,7 @@ func (s *SQLiteStore) migrate() error { return fmt.Errorf("read migration: %w", err) } - tx, err := s.db.Begin() + tx, err := s.writer.Begin() if err != nil { return fmt.Errorf("begin migration tx: %w", err) } @@ -92,7 +121,7 @@ func (s *SQLiteStore) PutBlobs(ctx context.Context, blobs []types.Blob) error { return nil } - tx, err := s.db.BeginTx(ctx, nil) + tx, err := s.writer.BeginTx(ctx, nil) if err != nil { return fmt.Errorf("begin tx: %w", err) } @@ -119,7 +148,7 @@ func (s *SQLiteStore) PutBlobs(ctx context.Context, blobs []types.Blob) error { } func (s *SQLiteStore) GetBlob(ctx context.Context, ns types.Namespace, height uint64, index int) (*types.Blob, error) { - row := s.db.QueryRowContext(ctx, + row := s.reader.QueryRowContext(ctx, `SELECT height, namespace, commitment, data, share_version, signer, blob_index FROM blobs WHERE namespace = ? AND height = ? AND blob_index = ?`, ns[:], height, index) @@ -127,12 +156,18 @@ func (s *SQLiteStore) GetBlob(ctx context.Context, ns types.Namespace, height ui return scanBlob(row) } -func (s *SQLiteStore) GetBlobs(ctx context.Context, ns types.Namespace, startHeight, endHeight uint64) ([]types.Blob, error) { - rows, err := s.db.QueryContext(ctx, - `SELECT height, namespace, commitment, data, share_version, signer, blob_index +func (s *SQLiteStore) GetBlobs(ctx context.Context, ns types.Namespace, startHeight, endHeight uint64, limit, offset int) ([]types.Blob, error) { + query := `SELECT height, namespace, commitment, data, share_version, signer, blob_index FROM blobs WHERE namespace = ? AND height >= ? AND height <= ? - ORDER BY height, blob_index`, - ns[:], startHeight, endHeight) + ORDER BY height, blob_index` + args := []any{ns[:], startHeight, endHeight} + + if limit > 0 { + query += ` LIMIT ? OFFSET ?` + args = append(args, limit, offset) + } + + rows, err := s.reader.QueryContext(ctx, query, args...) if err != nil { return nil, fmt.Errorf("query blobs: %w", err) } @@ -150,7 +185,7 @@ func (s *SQLiteStore) GetBlobs(ctx context.Context, ns types.Namespace, startHei } func (s *SQLiteStore) PutHeader(ctx context.Context, header *types.Header) error { - _, err := s.db.ExecContext(ctx, + _, err := s.writer.ExecContext(ctx, `INSERT OR IGNORE INTO headers (height, hash, data_hash, time_ns, raw_header) VALUES (?, ?, ?, ?, ?)`, header.Height, header.Hash, header.DataHash, header.Time.UnixNano(), header.RawHeader) @@ -163,7 +198,7 @@ func (s *SQLiteStore) PutHeader(ctx context.Context, header *types.Header) error func (s *SQLiteStore) GetHeader(ctx context.Context, height uint64) (*types.Header, error) { var h types.Header var timeNs int64 - err := s.db.QueryRowContext(ctx, + err := s.reader.QueryRowContext(ctx, `SELECT height, hash, data_hash, time_ns, raw_header FROM headers WHERE height = ?`, height).Scan(&h.Height, &h.Hash, &h.DataHash, &timeNs, &h.RawHeader) if err != nil { @@ -177,7 +212,7 @@ func (s *SQLiteStore) GetHeader(ctx context.Context, height uint64) (*types.Head } func (s *SQLiteStore) PutNamespace(ctx context.Context, ns types.Namespace) error { - _, err := s.db.ExecContext(ctx, + _, err := s.writer.ExecContext(ctx, `INSERT OR IGNORE INTO namespaces (namespace) VALUES (?)`, ns[:]) if err != nil { return fmt.Errorf("insert namespace: %w", err) @@ -186,7 +221,7 @@ func (s *SQLiteStore) PutNamespace(ctx context.Context, ns types.Namespace) erro } func (s *SQLiteStore) GetNamespaces(ctx context.Context) ([]types.Namespace, error) { - rows, err := s.db.QueryContext(ctx, `SELECT namespace FROM namespaces`) + rows, err := s.reader.QueryContext(ctx, `SELECT namespace FROM namespaces`) if err != nil { return nil, fmt.Errorf("query namespaces: %w", err) } @@ -211,7 +246,7 @@ func (s *SQLiteStore) GetNamespaces(ctx context.Context) ([]types.Namespace, err func (s *SQLiteStore) GetSyncState(ctx context.Context) (*types.SyncStatus, error) { var state int var latestHeight, networkHeight uint64 - err := s.db.QueryRowContext(ctx, + err := s.reader.QueryRowContext(ctx, `SELECT state, latest_height, network_height FROM sync_state WHERE id = 1`). Scan(&state, &latestHeight, &networkHeight) if err != nil { @@ -228,7 +263,7 @@ func (s *SQLiteStore) GetSyncState(ctx context.Context) (*types.SyncStatus, erro } func (s *SQLiteStore) SetSyncState(ctx context.Context, status types.SyncStatus) error { - _, err := s.db.ExecContext(ctx, + _, err := s.writer.ExecContext(ctx, `INSERT INTO sync_state (id, state, latest_height, network_height, updated_at) VALUES (1, ?, ?, ?, ?) ON CONFLICT(id) DO UPDATE SET @@ -244,7 +279,7 @@ func (s *SQLiteStore) SetSyncState(ctx context.Context, status types.SyncStatus) } func (s *SQLiteStore) Close() error { - return s.db.Close() + return errors.Join(s.reader.Close(), s.writer.Close()) } // scanBlob scans a single blob from a *sql.Row. diff --git a/pkg/store/sqlite_test.go b/pkg/store/sqlite_test.go index 1c69872..3c3aa1f 100644 --- a/pkg/store/sqlite_test.go +++ b/pkg/store/sqlite_test.go @@ -102,7 +102,7 @@ func TestPutGetBlobs(t *testing.T) { } // GetBlobs range - all, err := s.GetBlobs(ctx, ns, 10, 11) + all, err := s.GetBlobs(ctx, ns, 10, 11, 0, 0) if err != nil { t.Fatalf("GetBlobs: %v", err) } @@ -149,7 +149,7 @@ func TestPutBlobsIdempotent(t *testing.T) { t.Fatalf("PutBlobs (second): %v", err) } - all, err := s.GetBlobs(ctx, ns, 10, 10) + all, err := s.GetBlobs(ctx, ns, 10, 10, 0, 0) if err != nil { t.Fatalf("GetBlobs: %v", err) } @@ -272,7 +272,7 @@ func TestGetBlobsEmptyRange(t *testing.T) { s := openTestDB(t) ctx := context.Background() - blobs, err := s.GetBlobs(ctx, testNamespace(1), 1, 100) + blobs, err := s.GetBlobs(ctx, testNamespace(1), 1, 100, 0, 0) if err != nil { t.Fatalf("GetBlobs: %v", err) } diff --git a/pkg/store/store.go b/pkg/store/store.go index d2ac126..14e9d1b 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -17,7 +17,8 @@ type Store interface { // GetBlobs returns blobs for the given namespace in the height range // [startHeight, endHeight] (both inclusive). - GetBlobs(ctx context.Context, ns types.Namespace, startHeight, endHeight uint64) ([]types.Blob, error) + // limit=0 means no limit; offset=0 means no offset. + GetBlobs(ctx context.Context, ns types.Namespace, startHeight, endHeight uint64, limit, offset int) ([]types.Blob, error) PutHeader(ctx context.Context, header *types.Header) error GetHeader(ctx context.Context, height uint64) (*types.Header, error) diff --git a/pkg/sync/backfill.go b/pkg/sync/backfill.go index 1138d5a..ae445fc 100644 --- a/pkg/sync/backfill.go +++ b/pkg/sync/backfill.go @@ -18,6 +18,7 @@ type Backfiller struct { fetcher fetch.DataFetcher batchSize int concurrency int + observer HeightObserver log zerolog.Logger } @@ -121,8 +122,9 @@ func (b *Backfiller) processHeight(ctx context.Context, height uint64, namespace return fmt.Errorf("put header: %w", err) } + var blobs []types.Blob if len(namespaces) > 0 { - blobs, err := b.fetcher.GetBlobs(ctx, height, namespaces) + blobs, err = b.fetcher.GetBlobs(ctx, height, namespaces) if err != nil { return fmt.Errorf("get blobs: %w", err) } @@ -133,5 +135,9 @@ func (b *Backfiller) processHeight(ctx context.Context, height uint64, namespace } } + if b.observer != nil { + b.observer(height, hdr, blobs) + } + return nil } diff --git a/pkg/sync/coordinator.go b/pkg/sync/coordinator.go index a36739c..76bdae6 100644 --- a/pkg/sync/coordinator.go +++ b/pkg/sync/coordinator.go @@ -16,6 +16,9 @@ import ( // ErrGapDetected is returned by SubscriptionManager when a height gap is found. var ErrGapDetected = errors.New("gap detected") +// HeightObserver is called after a height is successfully processed and stored. +type HeightObserver func(height uint64, header *types.Header, blobs []types.Blob) + // Coordinator manages the sync lifecycle between a data fetcher and a store. type Coordinator struct { store store.Store @@ -25,6 +28,7 @@ type Coordinator struct { batchSize int concurrency int startHeight uint64 + observer HeightObserver log zerolog.Logger } @@ -61,6 +65,11 @@ func WithLogger(log zerolog.Logger) Option { return func(c *Coordinator) { c.log = log } } +// WithObserver sets a callback invoked after each height is successfully stored. +func WithObserver(obs HeightObserver) Option { + return func(c *Coordinator) { c.observer = obs } +} + // New creates a Coordinator with the given store, fetcher, and options. func New(s store.Store, f fetch.DataFetcher, opts ...Option) *Coordinator { coord := &Coordinator{ @@ -114,6 +123,7 @@ func (c *Coordinator) Run(ctx context.Context) error { fetcher: c.fetcher, batchSize: c.batchSize, concurrency: c.concurrency, + observer: c.observer, log: c.log.With().Str("component", "backfiller").Logger(), } if err := bf.Run(ctx, fromHeight, networkHeight); err != nil { @@ -126,9 +136,10 @@ func (c *Coordinator) Run(ctx context.Context) error { c.log.Info().Msg("entering streaming mode") sm := &SubscriptionManager{ - store: c.store, - fetcher: c.fetcher, - log: c.log.With().Str("component", "subscription").Logger(), + store: c.store, + fetcher: c.fetcher, + observer: c.observer, + log: c.log.With().Str("component", "subscription").Logger(), } err = sm.Run(ctx) if errors.Is(err, ErrGapDetected) { diff --git a/pkg/sync/mock_test.go b/pkg/sync/mock_test.go index 35cc415..b857561 100644 --- a/pkg/sync/mock_test.go +++ b/pkg/sync/mock_test.go @@ -68,7 +68,7 @@ func (m *mockStore) GetBlob(_ context.Context, ns types.Namespace, height uint64 return nil, store.ErrNotFound } -func (m *mockStore) GetBlobs(_ context.Context, ns types.Namespace, startHeight, endHeight uint64) ([]types.Blob, error) { +func (m *mockStore) GetBlobs(_ context.Context, ns types.Namespace, startHeight, endHeight uint64, _, _ int) ([]types.Blob, error) { m.mu.Lock() defer m.mu.Unlock() var result []types.Blob diff --git a/pkg/sync/subscription.go b/pkg/sync/subscription.go index 84eda98..301e01d 100644 --- a/pkg/sync/subscription.go +++ b/pkg/sync/subscription.go @@ -14,9 +14,10 @@ import ( // SubscriptionManager processes new headers from a live subscription. type SubscriptionManager struct { - store store.Store - fetcher fetch.DataFetcher - log zerolog.Logger + store store.Store + fetcher fetch.DataFetcher + observer HeightObserver + log zerolog.Logger } // Run subscribes to new headers and processes them sequentially. @@ -80,8 +81,10 @@ func (sm *SubscriptionManager) processHeader(ctx context.Context, hdr *types.Hea return fmt.Errorf("put header: %w", err) } + var blobs []types.Blob if len(namespaces) > 0 { - blobs, err := sm.fetcher.GetBlobs(ctx, hdr.Height, namespaces) + var err error + blobs, err = sm.fetcher.GetBlobs(ctx, hdr.Height, namespaces) if err != nil { return fmt.Errorf("get blobs: %w", err) } @@ -100,6 +103,10 @@ func (sm *SubscriptionManager) processHeader(ctx context.Context, hdr *types.Hea return fmt.Errorf("set sync state: %w", err) } + if sm.observer != nil { + sm.observer(hdr.Height, hdr, blobs) + } + sm.log.Debug().Uint64("height", hdr.Height).Msg("processed header") return nil } diff --git a/proto/apex/v1/blob.proto b/proto/apex/v1/blob.proto new file mode 100644 index 0000000..8af81d3 --- /dev/null +++ b/proto/apex/v1/blob.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package apex.v1; + +option go_package = "github.com/evstack/apex/pkg/api/grpc/gen;gen"; + +import "apex/v1/types.proto"; + +// BlobService provides access to indexed blobs. +service BlobService { + // Get returns a single blob matching the namespace and commitment at the given height. + rpc Get(GetRequest) returns (GetResponse); + + // GetAll returns all blobs for the given namespaces at the given height. + rpc GetAll(GetAllRequest) returns (GetAllResponse); + + // Subscribe streams blob events for the given namespace. + rpc Subscribe(BlobServiceSubscribeRequest) returns (stream BlobServiceSubscribeResponse); +} + +message GetRequest { + uint64 height = 1; + bytes namespace = 2; + bytes commitment = 3; +} + +message GetResponse { + Blob blob = 1; +} + +message GetAllRequest { + uint64 height = 1; + repeated bytes namespaces = 2; + // Pagination: maximum number of blobs to return. 0 means no limit. + int32 limit = 3; + // Pagination: number of blobs to skip. + int32 offset = 4; +} + +message GetAllResponse { + repeated Blob blobs = 1; +} + +message BlobServiceSubscribeRequest { + bytes namespace = 1; +} + +message BlobServiceSubscribeResponse { + uint64 height = 1; + repeated Blob blobs = 2; +} diff --git a/proto/apex/v1/header.proto b/proto/apex/v1/header.proto new file mode 100644 index 0000000..8772c79 --- /dev/null +++ b/proto/apex/v1/header.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package apex.v1; + +option go_package = "github.com/evstack/apex/pkg/api/grpc/gen;gen"; + +import "apex/v1/types.proto"; + +// HeaderService provides access to indexed headers. +service HeaderService { + // GetByHeight returns the header at the given height. + rpc GetByHeight(GetByHeightRequest) returns (GetByHeightResponse); + + // LocalHead returns the header at the latest synced height. + rpc LocalHead(LocalHeadRequest) returns (LocalHeadResponse); + + // NetworkHead returns the current network head from the upstream node. + rpc NetworkHead(NetworkHeadRequest) returns (NetworkHeadResponse); + + // Subscribe streams new headers as they are indexed. + rpc Subscribe(HeaderServiceSubscribeRequest) returns (stream HeaderServiceSubscribeResponse); +} + +message GetByHeightRequest { + uint64 height = 1; +} + +message GetByHeightResponse { + Header header = 1; +} + +message LocalHeadRequest {} + +message LocalHeadResponse { + Header header = 1; +} + +message NetworkHeadRequest {} + +message NetworkHeadResponse { + Header header = 1; +} + +message HeaderServiceSubscribeRequest {} + +message HeaderServiceSubscribeResponse { + Header header = 1; +} diff --git a/proto/apex/v1/types.proto b/proto/apex/v1/types.proto new file mode 100644 index 0000000..f81b338 --- /dev/null +++ b/proto/apex/v1/types.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package apex.v1; + +option go_package = "github.com/evstack/apex/pkg/api/grpc/gen;gen"; + +import "google/protobuf/timestamp.proto"; + +// Blob represents a blob submitted to a Celestia namespace. +message Blob { + uint64 height = 1; + bytes namespace = 2; + bytes data = 3; + bytes commitment = 4; + uint32 share_version = 5; + bytes signer = 6; + int32 index = 7; +} + +// Header represents a Celestia block header. +message Header { + uint64 height = 1; + bytes hash = 2; + bytes data_hash = 3; + google.protobuf.Timestamp time = 4; + bytes raw_header = 5; +}