diff --git a/.github/workflows/test-e2e-multi_pp.yml b/.github/workflows/test-e2e-multi_pp.yml deleted file mode 100644 index f1e35e9c2..000000000 --- a/.github/workflows/test-e2e-multi_pp.yml +++ /dev/null @@ -1,75 +0,0 @@ -# based on: https://github.com/0xPolygon/kurtosis-cdk/tree/main/docs/multi-pp-testing -name: Test e2e multi pp -on: - push: - branches: - - '**' - workflow_dispatch: {} - -jobs: - test-e2e-multi_pp: - strategy: - fail-fast: false - matrix: - go-version: [ 1.22.x ] - goarch: [ "amd64" ] - e2e-group: - - "fork12-pessimistic" - runs-on: ubuntu-latest - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Install Go - uses: actions/setup-go@v5 - with: - go-version: ${{ matrix.go-version }} - env: - GOARCH: ${{ matrix.goarch }} - - - name: Build Docker - run: make build-docker - - - name: Build Tools - run: make build-tools - - - name: Checkout Kurtosis CDK - uses: actions/checkout@v4 - with: - repository: 0xPolygon/kurtosis-cdk - path: kurtosis-cdk - ref: v0.2.26 - - - name: Install Kurtosis CDK tools - uses: ./kurtosis-cdk/.github/actions/setup-kurtosis-cdk - - - name: Setup Bats and bats libs - uses: bats-core/bats-action@2.0.0 - - - name: Test - run: make test-e2e-fork12-multi-pessimistic - - working-directory: test - env: - KURTOSIS_FOLDER: ${{ github.workspace }}/kurtosis-cdk - BATS_LIB_PATH: /usr/lib/ - agglayer_prover_sp1_key: ${{ secrets.SP1_PRIVATE_KEY }} - - - name: Dump enclave logs - if: failure() - run: kurtosis dump ./dump - - - name: Generate archive name - if: failure() - run: | - archive_name="dump_run_with_args_${{matrix.e2e-group}}_${{ github.run_id }}" - echo "ARCHIVE_NAME=${archive_name}" >> "$GITHUB_ENV" - echo "Generated archive name: ${archive_name}" - kurtosis service exec cdk cdk-node-001 'cat /etc/cdk/cdk-node-config.toml' > ./dump/cdk-node-config.toml - - - name: Upload logs - if: failure() - uses: actions/upload-artifact@v4 - with: - name: ${{ env.ARCHIVE_NAME }} - path: ./dump diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index a31f7d4f5..11c46489e 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -42,7 +42,6 @@ jobs: - "fork11-rollup" - "fork12-validium" - "fork12-rollup" - - "fork12-pessimistic" steps: - name: Checkout Code uses: actions/checkout@v4 diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 000000000..7fccd0c8e --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,75 @@ +with-expecter: true +issue-845-fix: true +resolve-type-alias: false +dir: "{{ .InterfaceDir }}/../mocks" +outpkg: "mocks" +filename: "mock_{{ .InterfaceName | snakecase | lower }}.go" +mockname: "{{ .InterfaceName }}" +packages: + github.com/0xPolygon/cdk/agglayer: + config: + inpackage: true + dir: "{{ .InterfaceDir }}" + outpkg: "{{ .PackageName }}" + interfaces: + AgglayerClientInterface: + config: + mockname: AgglayerClientMock + github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer: + config: + interfaces: + Synchronizer: + config: + dir: "./aggregator/mocks" + mockname: "{{ .InterfaceName }}InterfaceMock" + github.com/0xPolygon/cdk/aggregator: + config: + dir: "{{ .InterfaceDir }}/mocks" + mockname: "{{ .InterfaceName }}Mock" + interfaces: + ProverInterface: + Etherman: + StorageInterface: + EthTxManagerClient: + RPCInterface: + github.com/0xPolygon/cdk/aggregator/prover: + config: + dir: "{{ .InterfaceDir }}/mocks" + interfaces: + AggregatorService_ChannelServer: + config: + mockname: ChannelMock + github.com/0xPolygon/cdk/dataavailability: + config: + dir: "{{ .InterfaceDir }}/mocks_da" + all: true + outpkg: "mocks_da" + github.com/0xPolygon/cdk/sequencesender: + config: + dir: "{{ .InterfaceDir }}/mocks" + outpkg: "mocks" + mockname: "{{ .InterfaceName }}Mock" + interfaces: + EthTxManager: + configs: + - mockname: EthTxManagerMock + - mockname: EthTxManagerMock + dir: "{{ .InterfaceDir }}/../test/helpers" + outpkg: "helpers" + Etherman: + RPCInterface: + github.com/0xPolygon/cdk/sequencesender/txbuilder: + config: + dir: "{{ .InterfaceDir }}/mocks_txbuilder" + all: true + outpkg: "mocks_txbuilder" + mockname: "{{ .InterfaceName | camelcase | firstUpper }}" + interfaces: + TxBuilder: + configs: + - dir: "{{ .InterfaceDir }}/../mocks" + outpkg: "mocks" + mockname: "{{ .InterfaceName | camelcase | firstUpper }}Mock" + - dir: "{{ .InterfaceDir }}/mocks_txbuilder" + outpkg: "mocks_txbuilder" + mockname: "{{ .InterfaceName | camelcase | firstUpper }}" diff --git a/Dockerfile b/Dockerfile index fe0cf4335..2c3fbcbfc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ RUN go mod download # BUILD BINARY COPY . . -RUN make build-go build-tools +RUN make build-go # BUILD RUST BIN FROM --platform=${BUILDPLATFORM} rust:slim-bookworm AS chef diff --git a/Makefile b/Makefile index fad3c3950..1a7d76c24 100644 --- a/Makefile +++ b/Makefile @@ -36,12 +36,12 @@ check-docker: check-docker-compose: @which docker-compose > /dev/null || (echo "Error: docker-compose is not installed" && exit 1) -# Check for Protoc +# Check for protoc .PHONY: check-protoc check-protoc: - @which protoc > /dev/null || (echo "Error: Protoc is not installed" && exit 1) + @which protoc > /dev/null || (echo "Error: protoc is not installed" && exit 1) -# Check for Curl +# Check for curl .PHONY: check-curl check-curl: @which curl > /dev/null || (echo "Error: curl is not installed" && exit 1) @@ -56,20 +56,16 @@ install-linter: check-go check-curl generate-code-from-proto: check-protoc .PHONY: build -build: build-rust build-go build-tools## Builds the binaries locally into ./target +build: build-rust build-go ## Builds the binaries locally into ./target .PHONY: build-rust build-rust: - export BUILD_SCRIPT_DISABLED=1 && cargo build --release + BUILD_SCRIPT_DISABLED=1 cargo build --release --jobs=$(shell nproc) .PHONY: build-go build-go: $(GOENVVARS) go build -ldflags "all=$(LDFLAGS)" -o $(GOBIN)/$(GOBINARY) $(GOCMD) -.PHONY: build-tools -build-tools: ## Builds the tools - $(GOENVVARS) go build -o $(GOBIN)/aggsender_find_imported_bridge ./tools/aggsender_find_imported_bridge - .PHONY: build-docker build-docker: ## Builds a docker image with the cdk binary docker build -t cdk -f ./Dockerfile . diff --git a/agglayer/client.go b/agglayer/client.go index 8a186be46..01453165a 100644 --- a/agglayer/client.go +++ b/agglayer/client.go @@ -20,18 +20,10 @@ var ( jSONRPCCall = rpc.JSONRPCCall ) -type AggLayerClientGetEpochConfiguration interface { - GetEpochConfiguration() (*ClockConfiguration, error) -} - // AgglayerClientInterface is the interface that defines the methods that the AggLayerClient will implement type AgglayerClientInterface interface { SendTx(signedTx SignedTx) (common.Hash, error) WaitTxToBeMined(hash common.Hash, ctx context.Context) error - SendCertificate(certificate *SignedCertificate) (common.Hash, error) - GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) - GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) - AggLayerClientGetEpochConfiguration } // AggLayerClient is the client that will be used to interact with the AggLayer @@ -97,86 +89,3 @@ func (c *AggLayerClient) WaitTxToBeMined(hash common.Hash, ctx context.Context) } } } - -// SendCertificate sends a certificate to the AggLayer -func (c *AggLayerClient) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { - certificateToSend := certificate.CopyWithDefaulting() - - response, err := rpc.JSONRPCCall(c.url, "interop_sendCertificate", certificateToSend) - if err != nil { - return common.Hash{}, err - } - - if response.Error != nil { - return common.Hash{}, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) - } - - var result types.ArgHash - err = json.Unmarshal(response.Result, &result) - if err != nil { - return common.Hash{}, err - } - - return result.Hash(), nil -} - -// GetCertificateHeader returns the certificate header associated to the hash -func (c *AggLayerClient) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { - response, err := rpc.JSONRPCCall(c.url, "interop_getCertificateHeader", certificateHash) - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, fmt.Errorf("%d %s", response.Error.Code, response.Error.Message) - } - - var result *CertificateHeader - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, err - } - - return result, nil -} - -// GetEpochConfiguration returns the clock configuration of AggLayer -func (c *AggLayerClient) GetEpochConfiguration() (*ClockConfiguration, error) { - response, err := jSONRPCCall(c.url, "interop_getEpochConfiguration") - if err != nil { - return nil, err - } - - if response.Error != nil { - return nil, fmt.Errorf("GetEpochConfiguration code=%d msg=%s", response.Error.Code, response.Error.Message) - } - - var result *ClockConfiguration - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, err - } - - return result, nil -} - -// GetLatestKnownCertificateHeader returns the last certificate header submitted by networkID -func (c *AggLayerClient) GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) { - response, err := jSONRPCCall(c.url, "interop_getLatestKnownCertificateHeader", networkID) - if err != nil { - return nil, fmt.Errorf("GetLatestKnownCertificateHeader error jSONRPCCall. Err: %w", err) - } - - if response.Error != nil { - return nil, fmt.Errorf("GetLatestKnownCertificateHeader rpc returns an error: code=%d msg=%s", - response.Error.Code, response.Error.Message) - } - - var result *CertificateHeader - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, fmt.Errorf("GetLatestKnownCertificateHeader error Unmashal. Err: %w", err) - } - - return result, nil -} diff --git a/agglayer/client_test.go b/agglayer/client_test.go deleted file mode 100644 index 91ec98c50..000000000 --- a/agglayer/client_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package agglayer - -import ( - "fmt" - "testing" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -const ( - testURL = "http://localhost:8080" -) - -func TestExploratoryClient(t *testing.T) { - t.Skip("This test is for exploratory purposes only") - sut := NewAggLayerClient("http://127.0.0.1:32781") - config, err := sut.GetEpochConfiguration() - require.NoError(t, err) - require.NotNil(t, config) - fmt.Printf("Config: %s", config.String()) - - lastCert, err := sut.GetLatestKnownCertificateHeader(1) - require.NoError(t, err) - require.NotNil(t, lastCert) - fmt.Printf("LastCert: %s", lastCert.String()) -} - -func TestExploratoryGetCertificateHeader(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := NewAggLayerClient("http://localhost:32796") - certificateID := common.HexToHash("0xf153e75e24591432ac5deafaeaafba3fec0fd851261c86051b9c0d540b38c369") - certificateHeader, err := aggLayerClient.GetCertificateHeader(certificateID) - require.NoError(t, err) - fmt.Print(certificateHeader) -} -func TestExploratoryGetEpochConfiguration(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := NewAggLayerClient("http://localhost:32796") - clockConfig, err := aggLayerClient.GetEpochConfiguration() - require.NoError(t, err) - fmt.Print(clockConfig) -} - -func TestExploratoryGetLatestKnownCertificateHeader(t *testing.T) { - t.Skip("This test is exploratory and should be skipped") - aggLayerClient := NewAggLayerClient("http://localhost:32843") - cert, err := aggLayerClient.GetLatestKnownCertificateHeader(1) - require.NoError(t, err) - fmt.Print(cert) -} - -func TestGetEpochConfigurationResponseWithError(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Error: &rpc.ErrorObject{}, - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - clockConfig, err := sut.GetEpochConfiguration() - require.Nil(t, clockConfig) - require.Error(t, err) -} - -func TestGetEpochConfigurationResponseBadJson(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - clockConfig, err := sut.GetEpochConfiguration() - require.Nil(t, clockConfig) - require.Error(t, err) -} - -func TestGetEpochConfigurationErrorResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return rpc.Response{}, fmt.Errorf("unittest error") - } - clockConfig, err := sut.GetEpochConfiguration() - require.Nil(t, clockConfig) - require.Error(t, err) -} - -func TestGetEpochConfigurationOkResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{"epoch_duration": 1, "genesis_block": 1}`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - clockConfig, err := sut.GetEpochConfiguration() - require.NotNil(t, clockConfig) - require.NoError(t, err) - require.Equal(t, ClockConfiguration{ - EpochDuration: 1, - GenesisBlock: 1, - }, *clockConfig) -} - -func TestGetLatestKnownCertificateHeaderOkResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{"network_id":1,"height":0,"epoch_number":223,"certificate_index":0,"certificate_id":"0xf9179d2fbe535814b5a14496e2eed474f49c6131227a9dfc5d2d8caf9e212054","new_local_exit_root":"0x7ae06f4a5d0b6da7dd4973fb6ef40d82c9f2680899b3baaf9e564413b59cc160","metadata":"0x00000000000000000000000000000000000000000000000000000000000001a7","status":"Settled"}`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - cert, err := sut.GetLatestKnownCertificateHeader(1) - require.NotNil(t, cert) - require.NoError(t, err) - require.Nil(t, cert.PreviousLocalExitRoot) -} - -func TestGetLatestKnownCertificateHeaderErrorResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return rpc.Response{}, fmt.Errorf("unittest error") - } - - cert, err := sut.GetLatestKnownCertificateHeader(1) - - require.Nil(t, cert) - require.Error(t, err) -} - -func TestGetLatestKnownCertificateHeaderResponseBadJson(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{`), - } - jSONRPCCall = func(url, method string, params ...interface{}) (rpc.Response, error) { - return response, nil - } - - cert, err := sut.GetLatestKnownCertificateHeader(1) - - require.Nil(t, cert) - require.Error(t, err) -} - -func TestGetLatestKnownCertificateHeaderWithPrevLERResponse(t *testing.T) { - sut := NewAggLayerClient(testURL) - response := rpc.Response{ - Result: []byte(`{"network_id":1,"height":0,"epoch_number":223,"certificate_index":0,"certificate_id":"0xf9179d2fbe535814b5a14496e2eed474f49c6131227a9dfc5d2d8caf9e212054","prev_local_exit_root":"0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757","new_local_exit_root":"0x7ae06f4a5d0b6da7dd4973fb6ef40d82c9f2680899b3baaf9e564413b59cc160","metadata":"0x00000000000000000000000000000000000000000000000000000000000001a7","status":"Settled"}`), - } - jSONRPCCall = func(_, _ string, _ ...interface{}) (rpc.Response, error) { - return response, nil - } - cert, err := sut.GetLatestKnownCertificateHeader(1) - - require.NoError(t, err) - require.NotNil(t, cert) - - require.Equal(t, "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757", cert.PreviousLocalExitRoot.String()) -} diff --git a/agglayer/errors_test.go b/agglayer/errors_test.go deleted file mode 100644 index 142930263..000000000 --- a/agglayer/errors_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package agglayer - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestConvertMapValue_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want string - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key1", - want: "value1", - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": 1, - }, - key: "key1", - want: "", - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key2", - want: "", - errString: "key key2 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[string](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} - -//nolint:dupl -func TestConvertMapValue_Uint32(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want uint32 - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": uint32(123), - }, - key: "key1", - want: uint32(123), - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key1", - want: 0, - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": uint32(123), - }, - key: "key2", - want: 0, - errString: "key key2 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[uint32](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} - -//nolint:dupl -func TestConvertMapValue_Uint64(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want uint64 - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": uint64(3411), - }, - key: "key1", - want: uint64(3411), - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": "not a number", - }, - key: "key1", - want: 0, - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": uint64(123555), - }, - key: "key22", - want: 0, - errString: "key key22 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[uint64](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} - -func TestConvertMapValue_Bool(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - key string - want bool - errString string - }{ - { - name: "Key exists and type matches", - data: map[string]interface{}{ - "key1": true, - }, - key: "key1", - want: true, - }, - { - name: "Key exists but type does not match", - data: map[string]interface{}{ - "key1": "value1", - }, - key: "key1", - want: false, - errString: "is not of type", - }, - { - name: "Key does not exist", - data: map[string]interface{}{ - "key1": true, - }, - key: "key2", - want: false, - errString: "key key2 not found in map", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - got, err := convertMapValue[bool](tt.data, tt.key) - if tt.errString != "" { - require.ErrorContains(t, err, tt.errString) - } else { - require.Equal(t, tt.want, got) - } - }) - } -} diff --git a/agglayer/mock_agglayer_client.go b/agglayer/mock_agglayer_client.go deleted file mode 100644 index 8b8c86899..000000000 --- a/agglayer/mock_agglayer_client.go +++ /dev/null @@ -1,374 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package agglayer - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - mock "github.com/stretchr/testify/mock" -) - -// AgglayerClientMock is an autogenerated mock type for the AgglayerClientInterface type -type AgglayerClientMock struct { - mock.Mock -} - -type AgglayerClientMock_Expecter struct { - mock *mock.Mock -} - -func (_m *AgglayerClientMock) EXPECT() *AgglayerClientMock_Expecter { - return &AgglayerClientMock_Expecter{mock: &_m.Mock} -} - -// GetCertificateHeader provides a mock function with given fields: certificateHash -func (_m *AgglayerClientMock) GetCertificateHeader(certificateHash common.Hash) (*CertificateHeader, error) { - ret := _m.Called(certificateHash) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateHeader") - } - - var r0 *CertificateHeader - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash) (*CertificateHeader, error)); ok { - return rf(certificateHash) - } - if rf, ok := ret.Get(0).(func(common.Hash) *CertificateHeader); ok { - r0 = rf(certificateHash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*CertificateHeader) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(certificateHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_GetCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateHeader' -type AgglayerClientMock_GetCertificateHeader_Call struct { - *mock.Call -} - -// GetCertificateHeader is a helper method to define mock.On call -// - certificateHash common.Hash -func (_e *AgglayerClientMock_Expecter) GetCertificateHeader(certificateHash interface{}) *AgglayerClientMock_GetCertificateHeader_Call { - return &AgglayerClientMock_GetCertificateHeader_Call{Call: _e.mock.On("GetCertificateHeader", certificateHash)} -} - -func (_c *AgglayerClientMock_GetCertificateHeader_Call) Run(run func(certificateHash common.Hash)) *AgglayerClientMock_GetCertificateHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash)) - }) - return _c -} - -func (_c *AgglayerClientMock_GetCertificateHeader_Call) Return(_a0 *CertificateHeader, _a1 error) *AgglayerClientMock_GetCertificateHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_GetCertificateHeader_Call) RunAndReturn(run func(common.Hash) (*CertificateHeader, error)) *AgglayerClientMock_GetCertificateHeader_Call { - _c.Call.Return(run) - return _c -} - -// GetEpochConfiguration provides a mock function with no fields -func (_m *AgglayerClientMock) GetEpochConfiguration() (*ClockConfiguration, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetEpochConfiguration") - } - - var r0 *ClockConfiguration - var r1 error - if rf, ok := ret.Get(0).(func() (*ClockConfiguration, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *ClockConfiguration); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*ClockConfiguration) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_GetEpochConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEpochConfiguration' -type AgglayerClientMock_GetEpochConfiguration_Call struct { - *mock.Call -} - -// GetEpochConfiguration is a helper method to define mock.On call -func (_e *AgglayerClientMock_Expecter) GetEpochConfiguration() *AgglayerClientMock_GetEpochConfiguration_Call { - return &AgglayerClientMock_GetEpochConfiguration_Call{Call: _e.mock.On("GetEpochConfiguration")} -} - -func (_c *AgglayerClientMock_GetEpochConfiguration_Call) Run(run func()) *AgglayerClientMock_GetEpochConfiguration_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AgglayerClientMock_GetEpochConfiguration_Call) Return(_a0 *ClockConfiguration, _a1 error) *AgglayerClientMock_GetEpochConfiguration_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_GetEpochConfiguration_Call) RunAndReturn(run func() (*ClockConfiguration, error)) *AgglayerClientMock_GetEpochConfiguration_Call { - _c.Call.Return(run) - return _c -} - -// GetLatestKnownCertificateHeader provides a mock function with given fields: networkID -func (_m *AgglayerClientMock) GetLatestKnownCertificateHeader(networkID uint32) (*CertificateHeader, error) { - ret := _m.Called(networkID) - - if len(ret) == 0 { - panic("no return value specified for GetLatestKnownCertificateHeader") - } - - var r0 *CertificateHeader - var r1 error - if rf, ok := ret.Get(0).(func(uint32) (*CertificateHeader, error)); ok { - return rf(networkID) - } - if rf, ok := ret.Get(0).(func(uint32) *CertificateHeader); ok { - r0 = rf(networkID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*CertificateHeader) - } - } - - if rf, ok := ret.Get(1).(func(uint32) error); ok { - r1 = rf(networkID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_GetLatestKnownCertificateHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLatestKnownCertificateHeader' -type AgglayerClientMock_GetLatestKnownCertificateHeader_Call struct { - *mock.Call -} - -// GetLatestKnownCertificateHeader is a helper method to define mock.On call -// - networkID uint32 -func (_e *AgglayerClientMock_Expecter) GetLatestKnownCertificateHeader(networkID interface{}) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - return &AgglayerClientMock_GetLatestKnownCertificateHeader_Call{Call: _e.mock.On("GetLatestKnownCertificateHeader", networkID)} -} - -func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) Run(run func(networkID uint32)) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32)) - }) - return _c -} - -func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) Return(_a0 *CertificateHeader, _a1 error) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_GetLatestKnownCertificateHeader_Call) RunAndReturn(run func(uint32) (*CertificateHeader, error)) *AgglayerClientMock_GetLatestKnownCertificateHeader_Call { - _c.Call.Return(run) - return _c -} - -// SendCertificate provides a mock function with given fields: certificate -func (_m *AgglayerClientMock) SendCertificate(certificate *SignedCertificate) (common.Hash, error) { - ret := _m.Called(certificate) - - if len(ret) == 0 { - panic("no return value specified for SendCertificate") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(*SignedCertificate) (common.Hash, error)); ok { - return rf(certificate) - } - if rf, ok := ret.Get(0).(func(*SignedCertificate) common.Hash); ok { - r0 = rf(certificate) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(*SignedCertificate) error); ok { - r1 = rf(certificate) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_SendCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendCertificate' -type AgglayerClientMock_SendCertificate_Call struct { - *mock.Call -} - -// SendCertificate is a helper method to define mock.On call -// - certificate *SignedCertificate -func (_e *AgglayerClientMock_Expecter) SendCertificate(certificate interface{}) *AgglayerClientMock_SendCertificate_Call { - return &AgglayerClientMock_SendCertificate_Call{Call: _e.mock.On("SendCertificate", certificate)} -} - -func (_c *AgglayerClientMock_SendCertificate_Call) Run(run func(certificate *SignedCertificate)) *AgglayerClientMock_SendCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*SignedCertificate)) - }) - return _c -} - -func (_c *AgglayerClientMock_SendCertificate_Call) Return(_a0 common.Hash, _a1 error) *AgglayerClientMock_SendCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_SendCertificate_Call) RunAndReturn(run func(*SignedCertificate) (common.Hash, error)) *AgglayerClientMock_SendCertificate_Call { - _c.Call.Return(run) - return _c -} - -// SendTx provides a mock function with given fields: signedTx -func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { - ret := _m.Called(signedTx) - - if len(ret) == 0 { - panic("no return value specified for SendTx") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(SignedTx) (common.Hash, error)); ok { - return rf(signedTx) - } - if rf, ok := ret.Get(0).(func(SignedTx) common.Hash); ok { - r0 = rf(signedTx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(SignedTx) error); ok { - r1 = rf(signedTx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AgglayerClientMock_SendTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTx' -type AgglayerClientMock_SendTx_Call struct { - *mock.Call -} - -// SendTx is a helper method to define mock.On call -// - signedTx SignedTx -func (_e *AgglayerClientMock_Expecter) SendTx(signedTx interface{}) *AgglayerClientMock_SendTx_Call { - return &AgglayerClientMock_SendTx_Call{Call: _e.mock.On("SendTx", signedTx)} -} - -func (_c *AgglayerClientMock_SendTx_Call) Run(run func(signedTx SignedTx)) *AgglayerClientMock_SendTx_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(SignedTx)) - }) - return _c -} - -func (_c *AgglayerClientMock_SendTx_Call) Return(_a0 common.Hash, _a1 error) *AgglayerClientMock_SendTx_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AgglayerClientMock_SendTx_Call) RunAndReturn(run func(SignedTx) (common.Hash, error)) *AgglayerClientMock_SendTx_Call { - _c.Call.Return(run) - return _c -} - -// WaitTxToBeMined provides a mock function with given fields: hash, ctx -func (_m *AgglayerClientMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { - ret := _m.Called(hash, ctx) - - if len(ret) == 0 { - panic("no return value specified for WaitTxToBeMined") - } - - var r0 error - if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { - r0 = rf(hash, ctx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AgglayerClientMock_WaitTxToBeMined_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitTxToBeMined' -type AgglayerClientMock_WaitTxToBeMined_Call struct { - *mock.Call -} - -// WaitTxToBeMined is a helper method to define mock.On call -// - hash common.Hash -// - ctx context.Context -func (_e *AgglayerClientMock_Expecter) WaitTxToBeMined(hash interface{}, ctx interface{}) *AgglayerClientMock_WaitTxToBeMined_Call { - return &AgglayerClientMock_WaitTxToBeMined_Call{Call: _e.mock.On("WaitTxToBeMined", hash, ctx)} -} - -func (_c *AgglayerClientMock_WaitTxToBeMined_Call) Run(run func(hash common.Hash, ctx context.Context)) *AgglayerClientMock_WaitTxToBeMined_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash), args[1].(context.Context)) - }) - return _c -} - -func (_c *AgglayerClientMock_WaitTxToBeMined_Call) Return(_a0 error) *AgglayerClientMock_WaitTxToBeMined_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AgglayerClientMock_WaitTxToBeMined_Call) RunAndReturn(run func(common.Hash, context.Context) error) *AgglayerClientMock_WaitTxToBeMined_Call { - _c.Call.Return(run) - return _c -} - -// NewAgglayerClientMock creates a new instance of AgglayerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAgglayerClientMock(t interface { - mock.TestingT - Cleanup(func()) -}) *AgglayerClientMock { - mock := &AgglayerClientMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/agglayer/mock_agglayer_client_interface.go b/agglayer/mock_agglayer_client_interface.go new file mode 100644 index 000000000..ba870086b --- /dev/null +++ b/agglayer/mock_agglayer_client_interface.go @@ -0,0 +1,143 @@ +// Code generated by mockery v2.52.2. DO NOT EDIT. + +package agglayer + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" +) + +// AgglayerClientMock is an autogenerated mock type for the AgglayerClientInterface type +type AgglayerClientMock struct { + mock.Mock +} + +type AgglayerClientMock_Expecter struct { + mock *mock.Mock +} + +func (_m *AgglayerClientMock) EXPECT() *AgglayerClientMock_Expecter { + return &AgglayerClientMock_Expecter{mock: &_m.Mock} +} + +// SendTx provides a mock function with given fields: signedTx +func (_m *AgglayerClientMock) SendTx(signedTx SignedTx) (common.Hash, error) { + ret := _m.Called(signedTx) + + if len(ret) == 0 { + panic("no return value specified for SendTx") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(SignedTx) (common.Hash, error)); ok { + return rf(signedTx) + } + if rf, ok := ret.Get(0).(func(SignedTx) common.Hash); ok { + r0 = rf(signedTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(SignedTx) error); ok { + r1 = rf(signedTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// AgglayerClientMock_SendTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTx' +type AgglayerClientMock_SendTx_Call struct { + *mock.Call +} + +// SendTx is a helper method to define mock.On call +// - signedTx SignedTx +func (_e *AgglayerClientMock_Expecter) SendTx(signedTx interface{}) *AgglayerClientMock_SendTx_Call { + return &AgglayerClientMock_SendTx_Call{Call: _e.mock.On("SendTx", signedTx)} +} + +func (_c *AgglayerClientMock_SendTx_Call) Run(run func(signedTx SignedTx)) *AgglayerClientMock_SendTx_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(SignedTx)) + }) + return _c +} + +func (_c *AgglayerClientMock_SendTx_Call) Return(_a0 common.Hash, _a1 error) *AgglayerClientMock_SendTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *AgglayerClientMock_SendTx_Call) RunAndReturn(run func(SignedTx) (common.Hash, error)) *AgglayerClientMock_SendTx_Call { + _c.Call.Return(run) + return _c +} + +// WaitTxToBeMined provides a mock function with given fields: hash, ctx +func (_m *AgglayerClientMock) WaitTxToBeMined(hash common.Hash, ctx context.Context) error { + ret := _m.Called(hash, ctx) + + if len(ret) == 0 { + panic("no return value specified for WaitTxToBeMined") + } + + var r0 error + if rf, ok := ret.Get(0).(func(common.Hash, context.Context) error); ok { + r0 = rf(hash, ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AgglayerClientMock_WaitTxToBeMined_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitTxToBeMined' +type AgglayerClientMock_WaitTxToBeMined_Call struct { + *mock.Call +} + +// WaitTxToBeMined is a helper method to define mock.On call +// - hash common.Hash +// - ctx context.Context +func (_e *AgglayerClientMock_Expecter) WaitTxToBeMined(hash interface{}, ctx interface{}) *AgglayerClientMock_WaitTxToBeMined_Call { + return &AgglayerClientMock_WaitTxToBeMined_Call{Call: _e.mock.On("WaitTxToBeMined", hash, ctx)} +} + +func (_c *AgglayerClientMock_WaitTxToBeMined_Call) Run(run func(hash common.Hash, ctx context.Context)) *AgglayerClientMock_WaitTxToBeMined_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(common.Hash), args[1].(context.Context)) + }) + return _c +} + +func (_c *AgglayerClientMock_WaitTxToBeMined_Call) Return(_a0 error) *AgglayerClientMock_WaitTxToBeMined_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *AgglayerClientMock_WaitTxToBeMined_Call) RunAndReturn(run func(common.Hash, context.Context) error) *AgglayerClientMock_WaitTxToBeMined_Call { + _c.Call.Return(run) + return _c +} + +// NewAgglayerClientMock creates a new instance of AgglayerClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewAgglayerClientMock(t interface { + mock.TestingT + Cleanup(func()) +}) *AgglayerClientMock { + mock := &AgglayerClientMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/agglayer/types.go b/agglayer/types.go deleted file mode 100644 index de1a0063d..000000000 --- a/agglayer/types.go +++ /dev/null @@ -1,897 +0,0 @@ -package agglayer - -import ( - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "slices" - "strings" - - "github.com/0xPolygon/cdk/bridgesync" - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -type CertificateStatus int - -const ( - Pending CertificateStatus = iota - Proven - Candidate - InError - Settled - - nilStr = "nil" - nullStr = "null" - base10 = 10 -) - -var ( - NonSettledStatuses = []CertificateStatus{Pending, Candidate, Proven} - ClosedStatuses = []CertificateStatus{Settled, InError} -) - -// String representation of the enum -func (c CertificateStatus) String() string { - return [...]string{"Pending", "Proven", "Candidate", "InError", "Settled"}[c] -} - -// IsClosed returns true if the certificate is closed (settled or inError) -func (c CertificateStatus) IsClosed() bool { - return !c.IsOpen() -} - -// IsSettled returns true if the certificate is settled -func (c CertificateStatus) IsSettled() bool { - return c == Settled -} - -// IsInError returns true if the certificate is in error -func (c CertificateStatus) IsInError() bool { - return c == InError -} - -// IsOpen returns true if the certificate is open (pending, candidate or proven) -func (c CertificateStatus) IsOpen() bool { - return slices.Contains(NonSettledStatuses, c) -} - -// UnmarshalJSON is the implementation of the json.Unmarshaler interface -func (c *CertificateStatus) UnmarshalJSON(rawStatus []byte) error { - status := strings.Trim(string(rawStatus), "\"") - if strings.Contains(status, "InError") { - status = "InError" - } - - switch status { - case "Pending": - *c = Pending - case "InError": - *c = InError - case "Proven": - *c = Proven - case "Candidate": - *c = Candidate - case "Settled": - *c = Settled - default: - // Maybe the status is numeric: - var statusInt int - if _, err := fmt.Sscanf(status, "%d", &statusInt); err == nil { - *c = CertificateStatus(statusInt) - } else { - return fmt.Errorf("invalid status: %s", status) - } - } - - return nil -} - -type LeafType uint8 - -func (l LeafType) Uint8() uint8 { - return uint8(l) -} - -func (l LeafType) String() string { - return [...]string{"Transfer", "Message"}[l] -} - -func (l *LeafType) UnmarshalJSON(raw []byte) error { - rawStr := strings.Trim(string(raw), "\"") - switch rawStr { - case "Transfer": - *l = LeafTypeAsset - case "Message": - *l = LeafTypeMessage - default: - var value int - if _, err := fmt.Sscanf(rawStr, "%d", &value); err != nil { - return fmt.Errorf("invalid LeafType: %s", rawStr) - } - *l = LeafType(value) - } - return nil -} - -const ( - LeafTypeAsset LeafType = iota - LeafTypeMessage -) - -// Certificate is the data structure that will be sent to the agglayer -type Certificate struct { - NetworkID uint32 `json:"network_id"` - Height uint64 `json:"height"` - PrevLocalExitRoot common.Hash `json:"prev_local_exit_root"` - NewLocalExitRoot common.Hash `json:"new_local_exit_root"` - BridgeExits []*BridgeExit `json:"bridge_exits"` - ImportedBridgeExits []*ImportedBridgeExit `json:"imported_bridge_exits"` - Metadata common.Hash `json:"metadata"` -} - -// Brief returns a string with a brief cert -func (c *Certificate) Brief() string { - if c == nil { - return nilStr - } - res := fmt.Sprintf("agglayer.Cert {height: %d prevLER: %s newLER: %s exits: %d imported_exits: %d}", c.Height, - c.PrevLocalExitRoot.String(), c.NewLocalExitRoot.String(), - len(c.BridgeExits), len(c.ImportedBridgeExits)) - return res -} - -// Hash returns a hash that uniquely identifies the certificate -func (c *Certificate) Hash() common.Hash { - bridgeExitsHashes := make([][]byte, len(c.BridgeExits)) - for i, bridgeExit := range c.BridgeExits { - bridgeExitsHashes[i] = bridgeExit.Hash().Bytes() - } - - importedBridgeExitsHashes := make([][]byte, len(c.ImportedBridgeExits)) - for i, importedBridgeExit := range c.ImportedBridgeExits { - importedBridgeExitsHashes[i] = importedBridgeExit.Hash().Bytes() - } - - bridgeExitsPart := crypto.Keccak256(bridgeExitsHashes...) - importedBridgeExitsPart := crypto.Keccak256(importedBridgeExitsHashes...) - - return crypto.Keccak256Hash( - cdkcommon.Uint32ToBytes(c.NetworkID), - cdkcommon.Uint64ToBytes(c.Height), - c.PrevLocalExitRoot.Bytes(), - c.NewLocalExitRoot.Bytes(), - bridgeExitsPart, - importedBridgeExitsPart, - ) -} - -// HashToSign is the actual hash that needs to be signed by the aggsender -// as expected by the agglayer -func (c *Certificate) HashToSign() common.Hash { - globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits)) - for i, importedBridgeExit := range c.ImportedBridgeExits { - globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes() - } - - return crypto.Keccak256Hash( - c.NewLocalExitRoot.Bytes(), - crypto.Keccak256Hash(globalIndexHashes...).Bytes(), - ) -} - -// SignedCertificate is the struct that contains the certificate and the signature of the signer -type SignedCertificate struct { - *Certificate - Signature *Signature `json:"signature"` -} - -func (s *SignedCertificate) Brief() string { - return fmt.Sprintf("Certificate:%s,\nSignature: %s", s.Certificate.Brief(), s.Signature.String()) -} - -// CopyWithDefaulting returns a shallow copy of the signed certificate -func (s *SignedCertificate) CopyWithDefaulting() *SignedCertificate { - certificateCopy := *s.Certificate - - if certificateCopy.BridgeExits == nil { - certificateCopy.BridgeExits = make([]*BridgeExit, 0) - } - - if certificateCopy.ImportedBridgeExits == nil { - certificateCopy.ImportedBridgeExits = make([]*ImportedBridgeExit, 0) - } - - signature := s.Signature - if signature == nil { - signature = &Signature{} - } - - return &SignedCertificate{ - Certificate: &certificateCopy, - Signature: signature, - } -} - -// Signature is the data structure that will hold the signature of the given certificate -type Signature struct { - R common.Hash `json:"r"` - S common.Hash `json:"s"` - OddParity bool `json:"odd_y_parity"` -} - -func (s *Signature) String() string { - return fmt.Sprintf("R: %s, S: %s, OddParity: %t", s.R.String(), s.S.String(), s.OddParity) -} - -// TokenInfo encapsulates the information to uniquely identify a token on the origin network. -type TokenInfo struct { - OriginNetwork uint32 `json:"origin_network"` - OriginTokenAddress common.Address `json:"origin_token_address"` -} - -// String returns a string representation of the TokenInfo struct -func (t *TokenInfo) String() string { - return fmt.Sprintf("OriginNetwork: %d, OriginTokenAddress: %s", t.OriginNetwork, t.OriginTokenAddress.String()) -} - -// GlobalIndex represents the global index of an imported bridge exit -type GlobalIndex struct { - MainnetFlag bool `json:"mainnet_flag"` - RollupIndex uint32 `json:"rollup_index"` - LeafIndex uint32 `json:"leaf_index"` -} - -// String returns a string representation of the GlobalIndex struct -func (g *GlobalIndex) String() string { - return fmt.Sprintf("MainnetFlag: %t, RollupIndex: %d, LeafIndex: %d", g.MainnetFlag, g.RollupIndex, g.LeafIndex) -} - -func (g *GlobalIndex) Hash() common.Hash { - return crypto.Keccak256Hash( - cdkcommon.BigIntToLittleEndianBytes( - bridgesync.GenerateGlobalIndex(g.MainnetFlag, g.RollupIndex, g.LeafIndex), - ), - ) -} - -func (g *GlobalIndex) UnmarshalFromMap(data map[string]interface{}) error { - rollupIndex, err := convertMapValue[uint32](data, "rollup_index") - if err != nil { - return err - } - - leafIndex, err := convertMapValue[uint32](data, "leaf_index") - if err != nil { - return err - } - - mainnetFlag, err := convertMapValue[bool](data, "mainnet_flag") - if err != nil { - return err - } - - g.RollupIndex = rollupIndex - g.LeafIndex = leafIndex - g.MainnetFlag = mainnetFlag - - return nil -} - -// BridgeExit represents a token bridge exit -type BridgeExit struct { - LeafType LeafType `json:"leaf_type"` - TokenInfo *TokenInfo `json:"token_info"` - DestinationNetwork uint32 `json:"dest_network"` - DestinationAddress common.Address `json:"dest_address"` - Amount *big.Int `json:"amount"` - IsMetadataHashed bool `json:"-"` - Metadata []byte `json:"metadata"` -} - -func (b *BridgeExit) String() string { - res := fmt.Sprintf("LeafType: %s, DestinationNetwork: %d, DestinationAddress: %s, Amount: %s, Metadata: %s", - b.LeafType.String(), b.DestinationNetwork, b.DestinationAddress.String(), - b.Amount.String(), common.Bytes2Hex(b.Metadata)) - - if b.TokenInfo == nil { - res += ", TokenInfo: nil" - } else { - res += fmt.Sprintf(", TokenInfo: %s", b.TokenInfo.String()) - } - - return res -} - -// Hash returns a hash that uniquely identifies the bridge exit -func (b *BridgeExit) Hash() common.Hash { - if b.Amount == nil { - b.Amount = big.NewInt(0) - } - var metaDataHash []byte - if b.IsMetadataHashed { - metaDataHash = b.Metadata - } else { - metaDataHash = crypto.Keccak256(b.Metadata) - } - - return crypto.Keccak256Hash( - []byte{b.LeafType.Uint8()}, - cdkcommon.Uint32ToBytes(b.TokenInfo.OriginNetwork), - b.TokenInfo.OriginTokenAddress.Bytes(), - cdkcommon.Uint32ToBytes(b.DestinationNetwork), - b.DestinationAddress.Bytes(), - common.BigToHash(b.Amount).Bytes(), - metaDataHash, - ) -} - -// MarshalJSON is the implementation of the json.Marshaler interface -func (b *BridgeExit) MarshalJSON() ([]byte, error) { - var metadataString interface{} - if b.IsMetadataHashed { - metadataString = common.Bytes2Hex(b.Metadata) - } else if len(b.Metadata) > 0 { - metadataString = bytesToUints(b.Metadata) - } else { - metadataString = nil - } - - return json.Marshal(&struct { - LeafType string `json:"leaf_type"` - TokenInfo *TokenInfo `json:"token_info"` - DestinationNetwork uint32 `json:"dest_network"` - DestinationAddress common.Address `json:"dest_address"` - Amount string `json:"amount"` - Metadata interface{} `json:"metadata"` - }{ - LeafType: b.LeafType.String(), - TokenInfo: b.TokenInfo, - DestinationNetwork: b.DestinationNetwork, - DestinationAddress: b.DestinationAddress, - Amount: b.Amount.String(), - Metadata: metadataString, - }) -} - -func (b *BridgeExit) UnmarshalJSON(data []byte) error { - aux := &struct { - LeafType LeafType `json:"leaf_type"` - TokenInfo *TokenInfo `json:"token_info"` - DestinationNetwork uint32 `json:"dest_network"` - DestinationAddress common.Address `json:"dest_address"` - Amount string `json:"amount"` - Metadata interface{} `json:"metadata"` - }{} - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - b.LeafType = aux.LeafType - b.TokenInfo = aux.TokenInfo - b.DestinationNetwork = aux.DestinationNetwork - b.DestinationAddress = aux.DestinationAddress - var ok bool - if !strings.Contains(aux.Amount, nilStr) { - b.Amount, ok = new(big.Int).SetString(aux.Amount, base10) - if !ok { - return fmt.Errorf("failed to convert amount to big.Int: %s", aux.Amount) - } - } - if s, ok := aux.Metadata.(string); ok { - b.IsMetadataHashed = true - b.Metadata = common.Hex2Bytes(s) - } else if uints, ok := aux.Metadata.([]interface{}); ok { - b.IsMetadataHashed = false - b.Metadata = make([]byte, len(uints)) - for k, v := range uints { - value, ok := v.(float64) - if !ok { - return fmt.Errorf("failed to convert metadata to byte: %v", v) - } - b.Metadata[k] = byte(value) - } - } else { - b.Metadata = nil - } - return nil -} - -// bytesToUints converts a byte slice to a slice of uints -func bytesToUints(data []byte) []uint { - uints := make([]uint, len(data)) - for i, b := range data { - uints[i] = uint(b) - } - return uints -} - -// MerkleProof represents an inclusion proof of a leaf in a Merkle tree -type MerkleProof struct { - Root common.Hash `json:"root"` - Proof [types.DefaultHeight]common.Hash `json:"proof"` -} - -// MarshalJSON is the implementation of the json.Marshaler interface -func (m *MerkleProof) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Root common.Hash `json:"root"` - Proof map[string][types.DefaultHeight]common.Hash `json:"proof"` - }{ - Root: m.Root, - Proof: map[string][types.DefaultHeight]common.Hash{ - "siblings": m.Proof, - }, - }) -} - -func (m *MerkleProof) UnmarshalJSON(data []byte) error { - aux := &struct { - Root common.Hash `json:"root"` - Proof map[string][types.DefaultHeight]common.Hash `json:"proof"` - }{} - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - m.Root = aux.Root - m.Proof = aux.Proof["siblings"] - return nil -} - -// Hash returns the hash of the Merkle proof struct -func (m *MerkleProof) Hash() common.Hash { - proofsAsSingleSlice := make([]byte, 0) - - for _, proof := range m.Proof { - proofsAsSingleSlice = append(proofsAsSingleSlice, proof.Bytes()...) - } - - return crypto.Keccak256Hash( - m.Root.Bytes(), - proofsAsSingleSlice, - ) -} - -func (m *MerkleProof) String() string { - return fmt.Sprintf("Root: %s, Proof: %v", m.Root.String(), m.Proof) -} - -// L1InfoTreeLeafInner represents the inner part of the L1 info tree leaf -type L1InfoTreeLeafInner struct { - GlobalExitRoot common.Hash `json:"global_exit_root"` - BlockHash common.Hash `json:"block_hash"` - Timestamp uint64 `json:"timestamp"` -} - -// Hash returns the hash of the L1InfoTreeLeafInner struct -func (l *L1InfoTreeLeafInner) Hash() common.Hash { - return crypto.Keccak256Hash( - l.GlobalExitRoot.Bytes(), - l.BlockHash.Bytes(), - cdkcommon.Uint64ToBytes(l.Timestamp), - ) -} - -func (l *L1InfoTreeLeafInner) String() string { - return fmt.Sprintf("GlobalExitRoot: %s, BlockHash: %s, Timestamp: %d", - l.GlobalExitRoot.String(), l.BlockHash.String(), l.Timestamp) -} - -// L1InfoTreeLeaf represents the leaf of the L1 info tree -type L1InfoTreeLeaf struct { - L1InfoTreeIndex uint32 `json:"l1_info_tree_index"` - RollupExitRoot common.Hash `json:"rer"` - MainnetExitRoot common.Hash `json:"mer"` - Inner *L1InfoTreeLeafInner `json:"inner"` -} - -// Hash returns the hash of the L1InfoTreeLeaf struct -func (l *L1InfoTreeLeaf) Hash() common.Hash { - return l.Inner.Hash() -} - -func (l *L1InfoTreeLeaf) String() string { - return fmt.Sprintf("L1InfoTreeIndex: %d, RollupExitRoot: %s, MainnetExitRoot: %s, Inner: %s", - l.L1InfoTreeIndex, - l.RollupExitRoot.String(), - l.MainnetExitRoot.String(), - l.Inner.String(), - ) -} - -// Claim is the interface that will be implemented by the different types of claims -type Claim interface { - Type() string - Hash() common.Hash - MarshalJSON() ([]byte, error) - String() string -} - -// ClaimFromMainnnet represents a claim originating from the mainnet -type ClaimFromMainnnet struct { - ProofLeafMER *MerkleProof `json:"proof_leaf_mer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` -} - -// Type is the implementation of Claim interface -func (c ClaimFromMainnnet) Type() string { - return "Mainnet" -} - -// MarshalJSON is the implementation of Claim interface -func (c *ClaimFromMainnnet) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Child map[string]interface{} `json:"Mainnet"` - }{ - Child: map[string]interface{}{ - "proof_leaf_mer": c.ProofLeafMER, - "proof_ger_l1root": c.ProofGERToL1Root, - "l1_leaf": c.L1Leaf, - }, - }) -} - -func (c *ClaimFromMainnnet) UnmarshalJSON(data []byte) error { - if string(data) == nullStr { - return nil - } - - claimData := &struct { - Child struct { - ProofLeafMER *MerkleProof `json:"proof_leaf_mer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` - } `json:"Mainnet"` - }{} - if err := json.Unmarshal(data, &claimData); err != nil { - return fmt.Errorf("failed to unmarshal the subobject: %w", err) - } - c.ProofLeafMER = claimData.Child.ProofLeafMER - c.ProofGERToL1Root = claimData.Child.ProofGERToL1Root - c.L1Leaf = claimData.Child.L1Leaf - - return nil -} - -// Hash is the implementation of Claim interface -func (c *ClaimFromMainnnet) Hash() common.Hash { - return crypto.Keccak256Hash( - c.ProofLeafMER.Hash().Bytes(), - c.ProofGERToL1Root.Hash().Bytes(), - c.L1Leaf.Hash().Bytes(), - ) -} - -func (c *ClaimFromMainnnet) String() string { - return fmt.Sprintf("ProofLeafMER: %s, ProofGERToL1Root: %s, L1Leaf: %s", - c.ProofLeafMER.String(), c.ProofGERToL1Root.String(), c.L1Leaf.String()) -} - -// ClaimFromRollup represents a claim originating from a rollup -type ClaimFromRollup struct { - ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` - ProofLERToRER *MerkleProof `json:"proof_ler_rer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` -} - -// Type is the implementation of Claim interface -func (c ClaimFromRollup) Type() string { - return "Rollup" -} - -// MarshalJSON is the implementation of Claim interface -func (c *ClaimFromRollup) MarshalJSON() ([]byte, error) { - return json.Marshal(&struct { - Child map[string]interface{} `json:"Rollup"` - }{ - Child: map[string]interface{}{ - "proof_leaf_ler": c.ProofLeafLER, - "proof_ler_rer": c.ProofLERToRER, - "proof_ger_l1root": c.ProofGERToL1Root, - "l1_leaf": c.L1Leaf, - }, - }) -} - -func (c *ClaimFromRollup) UnmarshalJSON(data []byte) error { - if string(data) == nullStr { - return nil - } - - claimData := &struct { - Child struct { - ProofLeafLER *MerkleProof `json:"proof_leaf_ler"` - ProofLERToRER *MerkleProof `json:"proof_ler_rer"` - ProofGERToL1Root *MerkleProof `json:"proof_ger_l1root"` - L1Leaf *L1InfoTreeLeaf `json:"l1_leaf"` - } `json:"Rollup"` - }{} - - if err := json.Unmarshal(data, &claimData); err != nil { - return fmt.Errorf("failed to unmarshal the subobject: %w", err) - } - c.ProofLeafLER = claimData.Child.ProofLeafLER - c.ProofLERToRER = claimData.Child.ProofLERToRER - c.ProofGERToL1Root = claimData.Child.ProofGERToL1Root - c.L1Leaf = claimData.Child.L1Leaf - - return nil -} - -// Hash is the implementation of Claim interface -func (c *ClaimFromRollup) Hash() common.Hash { - return crypto.Keccak256Hash( - c.ProofLeafLER.Hash().Bytes(), - c.ProofLERToRER.Hash().Bytes(), - c.ProofGERToL1Root.Hash().Bytes(), - c.L1Leaf.Hash().Bytes(), - ) -} - -func (c *ClaimFromRollup) String() string { - return fmt.Sprintf("ProofLeafLER: %s, ProofLERToRER: %s, ProofGERToL1Root: %s, L1Leaf: %s", - c.ProofLeafLER.String(), c.ProofLERToRER.String(), c.ProofGERToL1Root.String(), c.L1Leaf.String()) -} - -// ClaimSelector is a helper struct that allow to decice which type of claim to unmarshal -type ClaimSelector struct { - obj Claim -} - -func (c *ClaimSelector) GetObject() Claim { - return c.obj -} - -func (c *ClaimSelector) UnmarshalJSON(data []byte) error { - var obj map[string]interface{} - if string(data) == nullStr { - return nil - } - if err := json.Unmarshal(data, &obj); err != nil { - return err - } - var ok bool - if _, ok = obj["Mainnet"]; ok { - c.obj = &ClaimFromMainnnet{} - } else if _, ok = obj["Rollup"]; ok { - c.obj = &ClaimFromRollup{} - } else { - return errors.New("invalid claim type") - } - - return json.Unmarshal(data, &c.obj) -} - -// ImportedBridgeExit represents a token bridge exit originating on another network but claimed on the current network. -type ImportedBridgeExit struct { - BridgeExit *BridgeExit `json:"bridge_exit"` - ClaimData Claim `json:"claim_data"` - GlobalIndex *GlobalIndex `json:"global_index"` -} - -func (c *ImportedBridgeExit) String() string { - var res string - - if c.BridgeExit == nil { - res = "BridgeExit: nil" - } else { - res = fmt.Sprintf("BridgeExit: %s", c.BridgeExit.String()) - } - - if c.GlobalIndex == nil { - res += ", GlobalIndex: nil" - } else { - res += fmt.Sprintf(", GlobalIndex: %s", c.GlobalIndex.String()) - } - - res += fmt.Sprintf("ClaimData: %s", c.ClaimData.String()) - - return res -} - -func (c *ImportedBridgeExit) UnmarshalJSON(data []byte) error { - aux := &struct { - BridgeExit *BridgeExit `json:"bridge_exit"` - ClaimData ClaimSelector `json:"claim_data"` - GlobalIndex *GlobalIndex `json:"global_index"` - }{} - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - c.BridgeExit = aux.BridgeExit - c.ClaimData = aux.ClaimData.GetObject() - c.GlobalIndex = aux.GlobalIndex - return nil -} - -// Hash returns a hash that uniquely identifies the imported bridge exit -func (c *ImportedBridgeExit) Hash() common.Hash { - return crypto.Keccak256Hash( - c.BridgeExit.Hash().Bytes(), - c.ClaimData.Hash().Bytes(), - c.GlobalIndex.Hash().Bytes(), - ) -} - -var _ error = (*GenericError)(nil) - -type GenericError struct { - Key string - Value string -} - -func (p *GenericError) Error() string { - return fmt.Sprintf("[Agglayer Error] %s: %s", p.Key, p.Value) -} - -// CertificateHeader is the structure returned by the interop_getCertificateHeader RPC call -type CertificateHeader struct { - NetworkID uint32 `json:"network_id"` - Height uint64 `json:"height"` - EpochNumber *uint64 `json:"epoch_number"` - CertificateIndex *uint64 `json:"certificate_index"` - CertificateID common.Hash `json:"certificate_id"` - PreviousLocalExitRoot *common.Hash `json:"prev_local_exit_root,omitempty"` - NewLocalExitRoot common.Hash `json:"new_local_exit_root"` - Status CertificateStatus `json:"status"` - Metadata common.Hash `json:"metadata"` - Error error `json:"-"` -} - -// ID returns a string with the ident of this cert (height/certID) -func (c *CertificateHeader) ID() string { - if c == nil { - return nilStr - } - return fmt.Sprintf("%d/%s", c.Height, c.CertificateID.String()) -} - -func (c *CertificateHeader) String() string { - if c == nil { - return nilStr - } - errors := "" - if c.Error != nil { - errors = c.Error.Error() - } - previousLocalExitRoot := nilStr - if c.PreviousLocalExitRoot != nil { - previousLocalExitRoot = c.PreviousLocalExitRoot.String() - } - return fmt.Sprintf("Height: %d, CertificateID: %s, PreviousLocalExitRoot: %s, NewLocalExitRoot: %s. Status: %s."+ - " Errors: [%s]", - c.Height, c.CertificateID.String(), previousLocalExitRoot, c.NewLocalExitRoot.String(), c.Status.String(), errors) -} - -func (c *CertificateHeader) UnmarshalJSON(data []byte) error { - // we define an alias to avoid infinite recursion - type Alias CertificateHeader - aux := &struct { - Status interface{} `json:"status"` - *Alias - }{ - Alias: (*Alias)(c), - } - - if err := json.Unmarshal(data, &aux); err != nil { - return err - } - - // Process Status field - switch status := aux.Status.(type) { - case string: // certificate not InError - if err := c.Status.UnmarshalJSON([]byte(status)); err != nil { - return err - } - case map[string]interface{}: // certificate has errors - inErrMap, err := convertMapValue[map[string]interface{}](status, "InError") - if err != nil { - return err - } - - inErrDataMap, err := convertMapValue[map[string]interface{}](inErrMap, "error") - if err != nil { - return err - } - - var agglayerErr error - - for errKey, errValueRaw := range inErrDataMap { - if errValueJSON, err := json.Marshal(errValueRaw); err != nil { - agglayerErr = &GenericError{ - Key: errKey, - Value: fmt.Sprintf("failed to marshal the agglayer error to the JSON. Raw value: %+v\nReason: %+v", - errValueRaw, err), - } - } else { - agglayerErr = &GenericError{Key: errKey, Value: string(errValueJSON)} - } - } - - c.Status = InError - c.Error = agglayerErr - default: - return errors.New("invalid status type") - } - - return nil -} - -// convertMapValue converts the value of a key in a map to a target type. -func convertMapValue[T any](data map[string]interface{}, key string) (T, error) { - value, ok := data[key] - if !ok { - var zero T - return zero, fmt.Errorf("key %s not found in map", key) - } - - // Try a direct type assertion - if convertedValue, ok := value.(T); ok { - return convertedValue, nil - } - - // If direct assertion fails, handle numeric type conversions - var target T - targetType := reflect.TypeOf(target) - - // Check if value is a float64 (default JSON number type) and target is a numeric type - if floatValue, ok := value.(float64); ok && targetType.Kind() >= reflect.Int && targetType.Kind() <= reflect.Uint64 { - convertedValue, err := convertNumeric(floatValue, targetType) - if err != nil { - return target, fmt.Errorf("conversion error for key %s: %w", key, err) - } - return convertedValue.(T), nil //nolint:forcetypeassert - } - - return target, fmt.Errorf("value of key %s is not of type %T", key, target) -} - -// convertNumeric converts a float64 to the specified numeric type. -func convertNumeric(value float64, targetType reflect.Type) (interface{}, error) { - switch targetType.Kind() { - case reflect.Int: - return int(value), nil - case reflect.Int8: - return int8(value), nil - case reflect.Int16: - return int16(value), nil - case reflect.Int32: - return int32(value), nil - case reflect.Int64: - return int64(value), nil - case reflect.Uint: - return uint(value), nil - case reflect.Uint8: - return uint8(value), nil - case reflect.Uint16: - return uint16(value), nil - case reflect.Uint32: - return uint32(value), nil - case reflect.Uint64: - return uint64(value), nil - case reflect.Float32: - return float32(value), nil - case reflect.Float64: - return value, nil - default: - return nil, fmt.Errorf("unsupported target type %v", targetType) - } -} - -// ClockConfiguration represents the configuration of the epoch clock -// returned by the interop_GetEpochConfiguration RPC call -type ClockConfiguration struct { - EpochDuration uint64 `json:"epoch_duration"` - GenesisBlock uint64 `json:"genesis_block"` -} - -func (c ClockConfiguration) String() string { - return fmt.Sprintf("EpochDuration: %d, GenesisBlock: %d", c.EpochDuration, c.GenesisBlock) -} diff --git a/agglayer/types_helpers_test.go b/agglayer/types_helpers_test.go deleted file mode 100644 index 1bd03a3fe..000000000 --- a/agglayer/types_helpers_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package agglayer - -import ( - "fmt" - "testing" - - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -// Helper function to create a dummy TokenInfo -func createDummyTokenInfo(t *testing.T) *TokenInfo { - t.Helper() - - return &TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x2345"), - } -} - -// Helper function to create a dummy GlobalIndex -func createDummyGlobalIndex(t *testing.T) *GlobalIndex { - t.Helper() - - return &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 10, - LeafIndex: 1, - } -} - -// Helper function to create a dummy Claim -func createDummyClaim(t *testing.T) *ClaimFromMainnnet { - t.Helper() - - return &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x1234"), - Proof: [common.HashLength]common.Hash{ - common.HexToHash("0x1234"), - common.HexToHash("0x5678"), - }, - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x5678"), - Proof: [common.HashLength]common.Hash{ - common.HexToHash("0x5678"), - common.HexToHash("0x1234"), - }, - }, - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x987654321"), - MainnetExitRoot: common.HexToHash("0x123456789"), - Inner: &L1InfoTreeLeafInner{}, - }, - } -} - -// Helper function to create a dummy proof -func createDummyProof(t *testing.T) types.Proof { - t.Helper() - - proof := types.Proof{} - - for i := 0; i < int(types.DefaultHeight); i++ { - proof[i] = common.HexToHash(fmt.Sprintf("0x%x", i)) - } - - return proof -} diff --git a/agglayer/types_test.go b/agglayer/types_test.go deleted file mode 100644 index 9e7e4d4b5..000000000 --- a/agglayer/types_test.go +++ /dev/null @@ -1,1203 +0,0 @@ -package agglayer - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "testing" - - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/require" -) - -const ( - expectedSignedCertificateEmptyMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","new_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":null}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":null},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` - expectedSignedCertificateMetadataJSON = `{"network_id":1,"height":1,"prev_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","new_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000000","bridge_exits":[{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":[1,2,3]}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":null,"dest_network":0,"dest_address":"0x0000000000000000000000000000000000000000","amount":"1","metadata":null},"claim_data":null,"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":1}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000000","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000000","s":"0x0000000000000000000000000000000000000000000000000000000000000000","odd_y_parity":false}}` - fullCertificateJSON = `{"network_id":1,"height":0,"prev_local_exit_root":"0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757","new_local_exit_root":"0x79011be874bf6f229d8473eb251aa932210bc3ab843a316492d5bc0e4b9e945b","bridge_exits":[{"leaf_type":"Transfer","token_info":{"origin_network":0,"origin_token_address":"0x0000000000000000000000000000000000000000"},"dest_network":0,"dest_address":"0xbece3a31343c6019cde0d5a4df2af8df17ebcb0f","amount":"10000005400000000","metadata":null}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Transfer","token_info":{"origin_network":0,"origin_token_address":"0x0000000000000000000000000000000000000000"},"dest_network":1,"dest_address":"0xbece3a31343c6019cde0d5a4df2af8df17ebcb0f","amount":"20000005400000000","metadata":null},"claim_data":{"Mainnet":{"l1_leaf":{"l1_info_tree_index":3,"rer":"0x0000000000000000000000000000000000000000000000000000000000000000","mer":"0x34c7e5206c4c793171805029b5a3a5c6f2d3e5344731cd69912142dc083768bf","inner":{"global_exit_root":"0xefb4efc883a8d7ab7c414684a4f44fac0f522d5eef9144dbad85a6b7756d770d","block_hash":"0x02224ad091ae2762001610174fb70885734761b3518aca77b8af63308f3c0b67","timestamp":1734434917}},"proof_ger_l1root":{"root":"0x73011c89c4cb976b1feeec2185dba22ecdac6d424afeb83ed5cacfdaae735e95","proof":{"siblings":["0x628d0adbb4d8c80a15f0743fa32385efd0798189228dd83c73e09409d94c2273","0x0ffee9fcedabc3f74d1b86e97c2b3039f7b2c617a100d6ca1ad5c0e613939b05","0xe7dcc1ef21d4705f16b0c2976a5358719a60361f2435bd342e3f97c287ae5040","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}},"proof_leaf_mer":{"root":"0x34c7e5206c4c793171805029b5a3a5c6f2d3e5344731cd69912142dc083768bf","proof":{"siblings":["0x7e5dddb55a966fa6ccd6d470bb326a4fcef563567d6897c45b7ed885de710757","0x4b274df9344e005bfd46536d791100a85234bef4fab0348d1b2ffc0e7a709d33","0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}}}},"global_index":{"mainnet_flag":true,"rollup_index":0,"leaf_index":2}},{"bridge_exit":{"leaf_type":"Transfer","token_info":{"origin_network":0,"origin_token_address":"0x0000000000000000000000000000000000000000"},"dest_network":1,"dest_address":"0xbece3a31343c6019cde0d5a4df2af8df17ebcb0f","amount":"1234567","metadata":null},"claim_data":{"Rollup":{"l1_leaf":{"l1_info_tree_index":4,"rer":"0x33267c0646fee979e59af1cd62f9e46cd0917f62aba82658e1a92a50e1d7b4d1","mer":"0x34c7e5206c4c793171805029b5a3a5c6f2d3e5344731cd69912142dc083768bf","inner":{"global_exit_root":"0x6df4684b75569ffa9c0d352d1293c5d98950ecc1ea34226194842d10b14f47d0","block_hash":"0x52bbc4079dcaaac2f6e950a0fe5aed613473faf48a721709ce347c4ddfe0b50d","timestamp":1734435263}},"proof_ger_l1root":{"root":"0x73011c89c4cb976b1feeec2185dba22ecdac6d424afeb83ed5cacfdaae735e95","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5","0x20a8c649fbea68114dca04c42bf16e23c6b39d4eafcc54378c5b7516c3a3c9d2","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}},"proof_leaf_ler":{"root":"0x156ab7795d0bb31ed548c43f90e71b8f06f71e5776a5ba444f3f3cb0935b4647","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5","0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}},"proof_ler_rer":{"root":"0x33267c0646fee979e59af1cd62f9e46cd0917f62aba82658e1a92a50e1d7b4d1","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5","0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30","0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85","0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344","0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d","0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968","0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83","0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af","0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0","0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5","0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892","0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c","0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb","0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc","0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2","0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f","0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a","0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0","0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0","0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2","0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9","0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377","0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652","0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef","0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d","0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0","0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e","0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e","0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322","0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735","0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"]}}}},"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":0}}],"metadata":"0x0100000000000000010000047867616580000000000000000000000000000000","signature":{"r":"0x4798dc4c299dfa4299c1992624271e2f1953cac3a909742ec4ca9549582c9273","s":"0x29762f0418ab0fc6019c1ea4c0722fe19477708e29e3d1416727339deba5660e","odd_y_parity":false}}` -) - -func TestBridgeExit_Hash(t *testing.T) { - t.Parallel() - - MetadaHash := common.HexToHash("0x1234") - bridge := BridgeExit{ - TokenInfo: &TokenInfo{}, - IsMetadataHashed: true, - Metadata: MetadaHash[:], - } - require.Equal(t, "0xaa57e4bf430fe25ca5068f9e1a25e8aef15744905cdf7635e0d5a468bd26bb18", - bridge.Hash().String(), "use the hashed metadata, instead of calculating hash") - - bridge.IsMetadataHashed = false - require.Equal(t, "0x79d5362ad609e06e022277ede4fd10899dc189c0ed56e1a2c6982d0563fe1be7", - bridge.Hash().String(), "metadata is not hashed, calculate hash") - - bridge.IsMetadataHashed = false - bridge.Metadata = []byte{} - require.Equal(t, "0xe3e297278c7df4ae4f235be10155ac62c53b08e2a14ed09b7dd6b688952ee883", - bridge.Hash().String(), "metadata is not hashed and it's empty, calculate hash") - - bridge.IsMetadataHashed = true - bridge.Metadata = []byte{} - require.Equal(t, "0x51980562e41978f15369c21f26920284ac6836d53b02cd89edf4fedc97e68215", - bridge.Hash().String(), "metadata is a hashed and it's empty,use it") -} - -func TestGenericError_Error(t *testing.T) { - t.Parallel() - - err := GenericError{"test", "value"} - require.Equal(t, "[Agglayer Error] test: value", err.Error()) -} - -func TestCertificateHeader_ID(t *testing.T) { - t.Parallel() - - certificate := CertificateHeader{ - Height: 1, - CertificateID: common.HexToHash("0x123"), - } - require.Equal(t, "1/0x0000000000000000000000000000000000000000000000000000000000000123", certificate.ID()) - - var certNil *CertificateHeader - require.Equal(t, "nil", certNil.ID()) -} - -func TestCertificateHeaderString(t *testing.T) { - t.Parallel() - - certificate := CertificateHeader{ - Height: 1, - CertificateID: common.HexToHash("0x123"), - } - require.Equal(t, "Height: 1, CertificateID: 0x0000000000000000000000000000000000000000000000000000000000000123, PreviousLocalExitRoot: nil, NewLocalExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000000. Status: Pending. Errors: []", - certificate.String()) - - var certNil *CertificateHeader - require.Equal(t, "nil", certNil.String()) -} - -func TestMarshalJSON(t *testing.T) { - t.Parallel() - - t.Run("MarshalJSON with empty proofs", func(t *testing.T) { - t.Parallel() - - cert := SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 1, - Height: 1, - PrevLocalExitRoot: common.Hash{}, - NewLocalExitRoot: common.Hash{}, - BridgeExits: []*BridgeExit{ - { - LeafType: LeafTypeAsset, - DestinationAddress: common.Address{}, - Amount: big.NewInt(1), - }, - }, - ImportedBridgeExits: []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - DestinationAddress: common.Address{}, - Amount: big.NewInt(1), - Metadata: []byte{}, - }, - ClaimData: nil, - GlobalIndex: &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 1, - }, - }, - }, - }, - - Signature: &Signature{ - R: common.Hash{}, - S: common.Hash{}, - OddParity: false, - }, - } - data, err := json.Marshal(cert) - require.NoError(t, err) - log.Info(string(data)) - require.Equal(t, expectedSignedCertificateEmptyMetadataJSON, string(data)) - - cert.BridgeExits[0].Metadata = []byte{1, 2, 3} - data, err = json.Marshal(cert) - require.NoError(t, err) - log.Info(string(data)) - require.Equal(t, expectedSignedCertificateMetadataJSON, string(data)) - }) - - t.Run("MarshalJSON with proofs", func(t *testing.T) { - t.Parallel() - - cert := SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 11, - Height: 111, - PrevLocalExitRoot: common.HexToHash("0x111"), - NewLocalExitRoot: common.HexToHash("0x222"), - BridgeExits: []*BridgeExit{ - { - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x123")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(1000), - Metadata: []byte{}, // we leave it empty on purpose to see when marshaled it will be null - }, - }, - ImportedBridgeExits: []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeMessage, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(2000), - Metadata: []byte{0x03, 0x04}, - }, - GlobalIndex: &GlobalIndex{ - MainnetFlag: true, - RollupIndex: 0, - LeafIndex: 1, - }, - ClaimData: &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x333"), - Proof: createDummyProof(t), - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x444"), - Proof: createDummyProof(t), - }, - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x555"), - MainnetExitRoot: common.HexToHash("0x123456"), - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x777"), - BlockHash: common.HexToHash("0x888"), - Timestamp: 12345678, - }, - }, - }, - }, - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0xabcdef"), - Amount: big.NewInt(2201), - Metadata: []byte{0x05, 0x08}, - }, - GlobalIndex: &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 2, - }, - ClaimData: &ClaimFromRollup{ - ProofLeafLER: &MerkleProof{ - Root: common.HexToHash("0x333"), - Proof: createDummyProof(t), - }, - ProofLERToRER: &MerkleProof{ - Root: common.HexToHash("0x444"), - Proof: createDummyProof(t), - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x555"), - Proof: createDummyProof(t), - }, - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 2, - RollupExitRoot: common.HexToHash("0x532"), - MainnetExitRoot: common.HexToHash("0x654321"), - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x777"), - BlockHash: common.HexToHash("0x888"), - Timestamp: 12345678, - }, - }, - }, - }, - }, - Metadata: common.HexToHash("0xdef"), - }, - Signature: &Signature{ - R: common.HexToHash("0x111"), - S: common.HexToHash("0x222"), - OddParity: true, - }, - } - - expectedJSON := `{"network_id":11,"height":111,"prev_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000111","new_local_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000222","bridge_exits":[{"leaf_type":"Transfer","token_info":{"origin_network":1,"origin_token_address":"0x0000000000000000000000000000000000000123"},"dest_network":2,"dest_address":"0x0000000000000000000000000000000000000456","amount":"1000","metadata":null}],"imported_bridge_exits":[{"bridge_exit":{"leaf_type":"Message","token_info":{"origin_network":1,"origin_token_address":"0x0000000000000000000000000000000000000789"},"dest_network":2,"dest_address":"0x0000000000000000000000000000000000000abc","amount":"2000","metadata":[3,4]},"claim_data":{"Mainnet":{"l1_leaf":{"l1_info_tree_index":1,"rer":"0x0000000000000000000000000000000000000000000000000000000000000555","mer":"0x0000000000000000000000000000000000000000000000000000000000123456","inner":{"global_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000777","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000888","timestamp":12345678}},"proof_ger_l1root":{"root":"0x0000000000000000000000000000000000000000000000000000000000000444","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}},"proof_leaf_mer":{"root":"0x0000000000000000000000000000000000000000000000000000000000000333","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}}}},"global_index":{"mainnet_flag":true,"rollup_index":0,"leaf_index":1}},{"bridge_exit":{"leaf_type":"Transfer","token_info":{"origin_network":1,"origin_token_address":"0x0000000000000000000000000000000000000789"},"dest_network":2,"dest_address":"0x0000000000000000000000000000000000abcdef","amount":"2201","metadata":[5,8]},"claim_data":{"Rollup":{"l1_leaf":{"l1_info_tree_index":2,"rer":"0x0000000000000000000000000000000000000000000000000000000000000532","mer":"0x0000000000000000000000000000000000000000000000000000000000654321","inner":{"global_exit_root":"0x0000000000000000000000000000000000000000000000000000000000000777","block_hash":"0x0000000000000000000000000000000000000000000000000000000000000888","timestamp":12345678}},"proof_ger_l1root":{"root":"0x0000000000000000000000000000000000000000000000000000000000000555","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}},"proof_leaf_ler":{"root":"0x0000000000000000000000000000000000000000000000000000000000000333","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}},"proof_ler_rer":{"root":"0x0000000000000000000000000000000000000000000000000000000000000444","proof":{"siblings":["0x0000000000000000000000000000000000000000000000000000000000000000","0x0000000000000000000000000000000000000000000000000000000000000001","0x0000000000000000000000000000000000000000000000000000000000000002","0x0000000000000000000000000000000000000000000000000000000000000003","0x0000000000000000000000000000000000000000000000000000000000000004","0x0000000000000000000000000000000000000000000000000000000000000005","0x0000000000000000000000000000000000000000000000000000000000000006","0x0000000000000000000000000000000000000000000000000000000000000007","0x0000000000000000000000000000000000000000000000000000000000000008","0x0000000000000000000000000000000000000000000000000000000000000009","0x000000000000000000000000000000000000000000000000000000000000000a","0x000000000000000000000000000000000000000000000000000000000000000b","0x000000000000000000000000000000000000000000000000000000000000000c","0x000000000000000000000000000000000000000000000000000000000000000d","0x000000000000000000000000000000000000000000000000000000000000000e","0x000000000000000000000000000000000000000000000000000000000000000f","0x0000000000000000000000000000000000000000000000000000000000000010","0x0000000000000000000000000000000000000000000000000000000000000011","0x0000000000000000000000000000000000000000000000000000000000000012","0x0000000000000000000000000000000000000000000000000000000000000013","0x0000000000000000000000000000000000000000000000000000000000000014","0x0000000000000000000000000000000000000000000000000000000000000015","0x0000000000000000000000000000000000000000000000000000000000000016","0x0000000000000000000000000000000000000000000000000000000000000017","0x0000000000000000000000000000000000000000000000000000000000000018","0x0000000000000000000000000000000000000000000000000000000000000019","0x000000000000000000000000000000000000000000000000000000000000001a","0x000000000000000000000000000000000000000000000000000000000000001b","0x000000000000000000000000000000000000000000000000000000000000001c","0x000000000000000000000000000000000000000000000000000000000000001d","0x000000000000000000000000000000000000000000000000000000000000001e","0x000000000000000000000000000000000000000000000000000000000000001f"]}}}},"global_index":{"mainnet_flag":false,"rollup_index":1,"leaf_index":2}}],"metadata":"0x0000000000000000000000000000000000000000000000000000000000000def","signature":{"r":"0x0000000000000000000000000000000000000000000000000000000000000111","s":"0x0000000000000000000000000000000000000000000000000000000000000222","odd_y_parity":true}}` - - data, err := json.Marshal(cert) - require.NoError(t, err) - require.Equal(t, expectedJSON, string(data)) - - require.Equal(t, "0xda355a601420351a0c950ebb34b6278580978d7b6a215338531d543a8f03574a", cert.Hash().String()) - require.Equal(t, "0x2f01782930cbf2bc2ab4ec16759a2288ad7df865dea387aadf55f96136269cf4", cert.BridgeExits[0].Hash().String()) - require.Equal(t, "0xac83b106ad2ca491828d49613c8356a15e3de298c794e1abd9632dc4d03b7c79", cert.ImportedBridgeExits[0].Hash().String()) - require.Equal(t, "0x6d9dc59396058ef7845fd872a87e77f1a58d010a760957f8814bd3d2ca5914a1", cert.ImportedBridgeExits[1].Hash().String()) - }) -} - -func TestSignedCertificate_Copy(t *testing.T) { - t.Parallel() - - t.Run("copy with non-nil fields", func(t *testing.T) { - t.Parallel() - - original := &SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 1, - Height: 100, - PrevLocalExitRoot: [32]byte{0x01}, - NewLocalExitRoot: [32]byte{0x02}, - BridgeExits: []*BridgeExit{ - { - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x123")}, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(1000), - Metadata: []byte{0x01, 0x02}, - }, - }, - ImportedBridgeExits: []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeMessage, - TokenInfo: &TokenInfo{OriginNetwork: 1, OriginTokenAddress: common.HexToAddress("0x789")}, - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(2000), - Metadata: []byte{0x03, 0x04}, - }, - ClaimData: &ClaimFromMainnnet{}, - GlobalIndex: &GlobalIndex{MainnetFlag: true, RollupIndex: 1, LeafIndex: 2}, - }, - }, - Metadata: common.HexToHash("0xdef"), - }, - Signature: &Signature{ - R: common.HexToHash("0x111"), - S: common.HexToHash("0x222"), - OddParity: true, - }, - } - - certificateCopy := original.CopyWithDefaulting() - - require.NotNil(t, certificateCopy) - require.NotSame(t, original, certificateCopy) - require.NotSame(t, original.Certificate, certificateCopy.Certificate) - require.Same(t, original.Signature, certificateCopy.Signature) - require.Equal(t, original, certificateCopy) - }) - - t.Run("copy with nil BridgeExits, ImportedBridgeExits and Signature", func(t *testing.T) { - t.Parallel() - - original := &SignedCertificate{ - Certificate: &Certificate{ - NetworkID: 1, - Height: 100, - PrevLocalExitRoot: [32]byte{0x01}, - NewLocalExitRoot: [32]byte{0x02}, - BridgeExits: nil, - ImportedBridgeExits: nil, - Metadata: common.HexToHash("0xdef"), - }, - Signature: nil, - } - - certificateCopy := original.CopyWithDefaulting() - - require.NotNil(t, certificateCopy) - require.NotSame(t, original, certificateCopy) - require.NotSame(t, original.Certificate, certificateCopy.Certificate) - require.NotNil(t, certificateCopy.Signature) - require.Equal(t, original.NetworkID, certificateCopy.NetworkID) - require.Equal(t, original.Height, certificateCopy.Height) - require.Equal(t, original.PrevLocalExitRoot, certificateCopy.PrevLocalExitRoot) - require.Equal(t, original.NewLocalExitRoot, certificateCopy.NewLocalExitRoot) - require.Equal(t, original.Metadata, certificateCopy.Metadata) - require.NotNil(t, certificateCopy.BridgeExits) - require.NotNil(t, certificateCopy.ImportedBridgeExits) - require.Empty(t, certificateCopy.BridgeExits) - require.Empty(t, certificateCopy.ImportedBridgeExits) - }) -} - -func TestGlobalIndex_UnmarshalFromMap(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - data map[string]interface{} - want *GlobalIndex - wantErr bool - }{ - { - name: "valid data", - data: map[string]interface{}{ - "rollup_index": uint32(0), - "leaf_index": uint32(2), - "mainnet_flag": true, - }, - want: &GlobalIndex{ - RollupIndex: 0, - LeafIndex: 2, - MainnetFlag: true, - }, - wantErr: false, - }, - { - name: "missing rollup_index", - data: map[string]interface{}{ - "leaf_index": uint32(2), - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "invalid rollup_index type", - data: map[string]interface{}{ - "rollup_index": "invalid", - "leaf_index": uint32(2), - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "missing leaf_index", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "invalid leaf_index type", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "leaf_index": "invalid", - "mainnet_flag": true, - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "missing mainnet_flag", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "leaf_index": uint32(2), - }, - want: &GlobalIndex{}, - wantErr: true, - }, - { - name: "invalid mainnet_flag type", - data: map[string]interface{}{ - "rollup_index": uint32(1), - "leaf_index": uint32(2), - "mainnet_flag": "invalid", - }, - want: &GlobalIndex{}, - wantErr: true, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - g := &GlobalIndex{} - err := g.UnmarshalFromMap(tt.data) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.want, g) - } - }) - } -} - -func TestUnmarshalCertificateHeader_UnknownError(t *testing.T) { - t.Parallel() - - rawCertificateHeader := `{ - "network_id": 14, - "height": 0, - "epoch_number": null, - "certificate_index": null, - "certificate_id": "0x3af88c9ca106822bd141fdc680dcb888f4e9d4997fad1645ba3d5d747059eb32", - "new_local_exit_root": "0x625e889ced3c31277c6653229096374d396a2fd3564a8894aaad2ff935d2fc8c", - "metadata": "0x0000000000000000000000000000000000000000000000000000000000002f3d", - "status": { - "InError": { - "error": { - "ProofVerificationFailed": { - "Plonk": "the verifying key does not match the inner plonk bn254 proof's committed verifying key" - } - } - } - } - }` - - var result *CertificateHeader - err := json.Unmarshal([]byte(rawCertificateHeader), &result) - require.NoError(t, err) - require.NotNil(t, result) - - expectedErr := &GenericError{ - Key: "ProofVerificationFailed", - Value: "{\"Plonk\":\"the verifying key does not match the inner plonk bn254 proof's committed verifying key\"}", - } - - require.Equal(t, expectedErr, result.Error) -} - -func TestConvertNumeric(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - value float64 - target reflect.Type - expected interface{} - expectedErr error - }{ - // Integer conversions - {"FloatToInt", 42.5, reflect.TypeOf(int(0)), int(42), nil}, - {"FloatToInt8", 127.5, reflect.TypeOf(int8(0)), int8(127), nil}, - {"FloatToInt16", 32767.5, reflect.TypeOf(int16(0)), int16(32767), nil}, - {"FloatToInt32", 2147483647.5, reflect.TypeOf(int32(0)), int32(2147483647), nil}, - {"FloatToInt64", -10000000000000000.9, reflect.TypeOf(int64(0)), int64(-10000000000000000), nil}, - - // Unsigned integer conversions - {"FloatToUint", 42.5, reflect.TypeOf(uint(0)), uint(42), nil}, - {"FloatToUint8", 255.5, reflect.TypeOf(uint8(0)), uint8(255), nil}, - {"FloatToUint16", 65535.5, reflect.TypeOf(uint16(0)), uint16(65535), nil}, - {"FloatToUint32", 4294967295.5, reflect.TypeOf(uint32(0)), uint32(4294967295), nil}, - {"FloatToUint64", 10000000000000000.9, reflect.TypeOf(uint64(0)), uint64(10000000000000000), nil}, - - // Float conversions - {"FloatToFloat32", 3.14, reflect.TypeOf(float32(0)), float32(3.14), nil}, - {"FloatToFloat64", 3.14, reflect.TypeOf(float64(0)), float64(3.14), nil}, - - // Unsupported type - {"UnsupportedType", 3.14, reflect.TypeOf("string"), nil, errors.New("unsupported target type string")}, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result, err := convertNumeric(tt.value, tt.target) - if tt.expectedErr != nil { - require.ErrorContains(t, err, tt.expectedErr.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.expected, result) - }) - } -} - -func TestCertificate_Hash(t *testing.T) { - t.Parallel() - - // Test inputs - prevLocalExitRoot := [common.HashLength]byte{} - newLocalExitRoot := [common.HashLength]byte{} - copy(prevLocalExitRoot[:], bytes.Repeat([]byte{0x01}, common.HashLength)) - copy(newLocalExitRoot[:], bytes.Repeat([]byte{0x02}, common.HashLength)) - - // Create dummy BridgeExits - bridgeExits := []*BridgeExit{ - { - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000001"), - Amount: big.NewInt(100), - Metadata: []byte("metadata1"), - }, - { - LeafType: LeafTypeMessage, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000002"), - Amount: big.NewInt(200), - Metadata: []byte("metadata2"), - }, - } - - // Create dummy ImportedBridgeExits - importedBridgeExits := []*ImportedBridgeExit{ - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000003"), - Amount: big.NewInt(300), - Metadata: []byte("metadata3"), - }, - ClaimData: createDummyClaim(t), - GlobalIndex: createDummyGlobalIndex(t), - }, - { - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0x0000000000000000000000000000000000000004"), - Amount: big.NewInt(400), - Metadata: []byte("metadata4"), - }, - ClaimData: createDummyClaim(t), - GlobalIndex: createDummyGlobalIndex(t), - }, - } - - metadata := common.HexToHash("0x123456789abcdef123456789abcdef123456789abcdef123456789abcdef1234") - - // Create the certificate - certificate := &Certificate{ - NetworkID: 1, - Height: 100, - PrevLocalExitRoot: prevLocalExitRoot, - NewLocalExitRoot: newLocalExitRoot, - BridgeExits: bridgeExits, - ImportedBridgeExits: importedBridgeExits, - Metadata: metadata, - } - - // Manually calculate the expected hash - bridgeExitsHashes := [][]byte{ - bridgeExits[0].Hash().Bytes(), - bridgeExits[1].Hash().Bytes(), - } - importedBridgeExitsHashes := [][]byte{ - importedBridgeExits[0].Hash().Bytes(), - importedBridgeExits[1].Hash().Bytes(), - } - - bridgeExitsPart := crypto.Keccak256(bridgeExitsHashes...) - importedBridgeExitsPart := crypto.Keccak256(importedBridgeExitsHashes...) - - expectedHash := crypto.Keccak256Hash( - cdkcommon.Uint32ToBytes(1), - cdkcommon.Uint64ToBytes(100), - prevLocalExitRoot[:], - newLocalExitRoot[:], - bridgeExitsPart, - importedBridgeExitsPart, - ) - - // Test the certificate hash - calculatedHash := certificate.Hash() - - require.Equal(t, calculatedHash, expectedHash) -} - -func TestCertificate_HashToSign(t *testing.T) { - t.Parallel() - - c := &Certificate{ - NewLocalExitRoot: common.HexToHash("0xabcd"), - ImportedBridgeExits: []*ImportedBridgeExit{ - { - GlobalIndex: &GlobalIndex{ - MainnetFlag: true, - RollupIndex: 23, - LeafIndex: 1, - }, - }, - { - GlobalIndex: &GlobalIndex{ - MainnetFlag: false, - RollupIndex: 15, - LeafIndex: 2, - }, - }, - }, - } - - globalIndexHashes := make([][]byte, len(c.ImportedBridgeExits)) - for i, importedBridgeExit := range c.ImportedBridgeExits { - globalIndexHashes[i] = importedBridgeExit.GlobalIndex.Hash().Bytes() - } - - expectedHash := crypto.Keccak256Hash( - c.NewLocalExitRoot[:], - crypto.Keccak256Hash(globalIndexHashes...).Bytes(), - ) - - certHash := c.HashToSign() - require.Equal(t, expectedHash, certHash) -} - -func TestClaimFromMainnnet_MarshalJSON(t *testing.T) { - t.Parallel() - - // Test data - merkleProof := &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{ - common.HexToHash("0x2"), - common.HexToHash("0x3"), - }, - } - - l1InfoTreeLeaf := &L1InfoTreeLeaf{ - L1InfoTreeIndex: 42, - RollupExitRoot: [common.HashLength]byte{0xaa, 0xbb, 0xcc}, - MainnetExitRoot: [common.HashLength]byte{0xdd, 0xee, 0xff}, - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x1"), - BlockHash: common.HexToHash("0x2"), - Timestamp: 1672531200, // Example timestamp - }, - } - - claim := &ClaimFromMainnnet{ - ProofLeafMER: merkleProof, - ProofGERToL1Root: merkleProof, - L1Leaf: l1InfoTreeLeaf, - } - - // Marshal the ClaimFromMainnnet struct to JSON - expectedJSON, err := claim.MarshalJSON() - require.NoError(t, err) - - var actualClaim ClaimFromMainnnet - err = json.Unmarshal(expectedJSON, &actualClaim) - require.NoError(t, err) -} - -func TestBridgeExit_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - bridgeExit *BridgeExit - expectedOutput string - }{ - { - name: "With TokenInfo", - bridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: createDummyTokenInfo(t), - DestinationNetwork: 100, - DestinationAddress: common.HexToAddress("0x2"), - Amount: big.NewInt(1000), - Metadata: []byte{0x01, 0x02, 0x03}, - }, - expectedOutput: "LeafType: Transfer, DestinationNetwork: 100, DestinationAddress: 0x0000000000000000000000000000000000000002, Amount: 1000, Metadata: 010203, TokenInfo: OriginNetwork: 1, OriginTokenAddress: 0x0000000000000000000000000000000000002345", - }, - { - name: "Without TokenInfo", - bridgeExit: &BridgeExit{ - LeafType: LeafTypeMessage, - DestinationNetwork: 200, - DestinationAddress: common.HexToAddress("0x1"), - Amount: big.NewInt(5000), - Metadata: []byte{0xff, 0xee, 0xdd}, - }, - expectedOutput: "LeafType: Message, DestinationNetwork: 200, DestinationAddress: 0x0000000000000000000000000000000000000001, Amount: 5000, Metadata: ffeedd, TokenInfo: nil", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - actualOutput := tt.bridgeExit.String() - require.Equal(t, tt.expectedOutput, actualOutput) - }) - } -} - -func TestCertificateStatus_UnmarshalJSON(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input string - expected CertificateStatus - expectError bool - }{ - { - name: "Valid status - Pending", - input: `"Pending"`, - expected: Pending, - expectError: false, - }, - { - name: "Valid status - Proven", - input: `"Proven"`, - expected: Proven, - expectError: false, - }, - { - name: "Valid status - Candidate", - input: `"Candidate"`, - expected: Candidate, - expectError: false, - }, - { - name: "Valid status - InError", - input: `"InError"`, - expected: InError, - expectError: false, - }, - { - name: "Valid status - Settled", - input: `"Settled"`, - expected: Settled, - expectError: false, - }, - { - name: "Invalid status", - input: `"InvalidStatus"`, - expected: 0, // Unchanged (default value of CertificateStatus) - expectError: true, - }, - { - name: "Contains 'InError' string", - input: `"SomeStringWithInError"`, - expected: InError, - expectError: false, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - var status CertificateStatus - err := json.Unmarshal([]byte(tt.input), &status) - - if tt.expectError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expected, status) - } - }) - } -} - -func TestMerkleProof_String(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - proof MerkleProof - expected string - }{ - { - name: "Empty Root and Empty Proof", - proof: MerkleProof{ - Root: common.Hash{}, - Proof: [types.DefaultHeight]common.Hash{}, - }, - expected: fmt.Sprintf("Root: %s, Proof: %v", common.Hash{}.String(), [types.DefaultHeight]common.Hash{}), - }, - { - name: "Non-Empty Root and Empty Proof", - proof: MerkleProof{ - Root: common.HexToHash("0xabc123"), - Proof: [types.DefaultHeight]common.Hash{}, - }, - expected: fmt.Sprintf("Root: %s, Proof: %v", common.HexToHash("0xabc123").String(), [types.DefaultHeight]common.Hash{}), - }, - { - name: "Non-Empty Root and Partially Populated Proof", - proof: MerkleProof{ - Root: common.HexToHash("0xabc123"), - Proof: [types.DefaultHeight]common.Hash{ - common.HexToHash("0xdef456"), - common.HexToHash("0x123789"), - }, - }, - expected: fmt.Sprintf("Root: %s, Proof: %v", - common.HexToHash("0xabc123").String(), - [types.DefaultHeight]common.Hash{ - common.HexToHash("0xdef456"), - common.HexToHash("0x123789"), - }), - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := tt.proof.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestGlobalIndexString(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input GlobalIndex - expected string - }{ - { - name: "All fields zero", - input: GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 0, - }, - expected: "MainnetFlag: false, RollupIndex: 0, LeafIndex: 0", - }, - { - name: "MainnetFlag true, non-zero indices", - input: GlobalIndex{ - MainnetFlag: true, - RollupIndex: 123, - LeafIndex: 456, - }, - expected: "MainnetFlag: true, RollupIndex: 123, LeafIndex: 456", - }, - { - name: "MainnetFlag false, large indices", - input: GlobalIndex{ - MainnetFlag: false, - RollupIndex: 4294967295, // Maximum value of uint32 - LeafIndex: 2147483647, // Large but within uint32 range - }, - expected: "MainnetFlag: false, RollupIndex: 4294967295, LeafIndex: 2147483647", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := tt.input.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestL1InfoTreeLeafString(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - input L1InfoTreeLeaf - expected string - }{ - { - name: "With valid Inner", - input: L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x01"), - MainnetExitRoot: common.HexToHash("0x02"), - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x03"), - BlockHash: common.HexToHash("0x04"), - Timestamp: 1234567890, - }, - }, - expected: "L1InfoTreeIndex: 1, RollupExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000001, " + - "MainnetExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000002, " + - "Inner: GlobalExitRoot: 0x0000000000000000000000000000000000000000000000000000000000000003, " + - "BlockHash: 0x0000000000000000000000000000000000000000000000000000000000000004, Timestamp: 1234567890", - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := tt.input.String() - require.Equal(t, tt.expected, result) - }) - } -} - -func TestClaimType(t *testing.T) { - t.Parallel() - - cases := []struct { - name string - claim Claim - expectedType string - }{ - { - name: "Mainnet claim", - claim: &ClaimFromMainnnet{}, - expectedType: "Mainnet", - }, - { - name: "Rollup claim", - claim: &ClaimFromRollup{}, - expectedType: "Rollup", - }, - } - - for _, c := range cases { - c := c - - t.Run(c.name, func(t *testing.T) { - t.Parallel() - - actualType := c.claim.Type() - require.Equal(t, c.expectedType, actualType) - }) - } -} - -func Test_ProblematicBridgeExitHash(t *testing.T) { - bridgeExit := &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{ - OriginNetwork: 0, - OriginTokenAddress: common.HexToAddress("0x0000000000000000000000000000000000000000"), - }, - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0xc949254d682d8c9ad5682521675b8f43b102aec4"), - Amount: new(big.Int).SetUint64(10000000000000000000), - IsMetadataHashed: false, - } - - require.Equal(t, "0x22ed288677b4c2afd83a6d7d55f7df7f4eaaf60f7310210c030fd27adacbc5e0", bridgeExit.Hash().Hex()) -} - -func Test_UnmarshalCertificate(t *testing.T) { - var cert SignedCertificate - err := json.Unmarshal([]byte(fullCertificateJSON), &cert) - require.NoError(t, err) - marshalData, err := json.Marshal(cert) - require.NoError(t, err) - require.JSONEq(t, fullCertificateJSON, string(marshalData)) -} - -func Test_UnmarshalImportedBridgeExit(t *testing.T) { - cases := []struct { - name string - importedBridge ImportedBridgeExit - }{ - { - name: "Empty", - importedBridge: ImportedBridgeExit{}, - }, - { - name: "Empty/BridgeExit", - importedBridge: ImportedBridgeExit{ - BridgeExit: &BridgeExit{}, - }, - }, - { - name: "Empty/GlobalIndex", - importedBridge: ImportedBridgeExit{ - GlobalIndex: &GlobalIndex{}, - }, - }, - { - name: "Empty/ClaimFromRollup", - importedBridge: ImportedBridgeExit{ - GlobalIndex: &GlobalIndex{}, - ClaimData: &ClaimFromRollup{}, - }, - }, - { - name: "Empty/ClaimFromMainnnet", - importedBridge: ImportedBridgeExit{ - ClaimData: &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - }, - ProofGERToL1Root: &MerkleProof{}, - L1Leaf: &L1InfoTreeLeaf{}, - }, - }, - }, - - { - name: "Mainnet claim", - importedBridge: ImportedBridgeExit{ - BridgeExit: &BridgeExit{ - LeafType: LeafTypeAsset, - TokenInfo: &TokenInfo{ - OriginNetwork: 0, - OriginTokenAddress: common.HexToAddress("0x1234"), - }, - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("0x1234"), - Amount: big.NewInt(1000), - IsMetadataHashed: false, - Metadata: []byte{0x01, 0x02, 0x03}, - }, - ClaimData: &ClaimFromMainnnet{}, - GlobalIndex: &GlobalIndex{ - MainnetFlag: true, - RollupIndex: 1, - LeafIndex: 2, - }, - }, - }, - } - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - ser, err := json.Marshal(tt.importedBridge) - require.NoError(t, err) - unmarshallBridge := &ImportedBridgeExit{} - err = json.Unmarshal(ser, unmarshallBridge) - require.NoError(t, err) - require.Equal(t, tt.importedBridge, *unmarshallBridge) - }) - } -} - -func Test_UnmarshalMerkleProof(t *testing.T) { - mp := &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - } - ser, err := json.Marshal(mp) - require.NoError(t, err) - unmarshallMp := &MerkleProof{} - err = json.Unmarshal(ser, unmarshallMp) - require.NoError(t, err) - require.Equal(t, mp, unmarshallMp) -} - -func Test_UnmarshalL1InfoTreeLeaf(t *testing.T) { - data := L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: [common.HashLength]byte{0xaa, 0xbb, 0xcc}, - MainnetExitRoot: [common.HashLength]byte{0xdd, 0xee, 0xff}, - Inner: &L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x1"), - BlockHash: common.HexToHash("0x2"), - Timestamp: 1672531200, // Example timestamp - }, - } - ser, err := json.Marshal(data) - require.NoError(t, err) - unmarshalled := &L1InfoTreeLeaf{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, data, *unmarshalled) -} - -func Test_UnmarshalBridgeExit(t *testing.T) { - cases := []struct { - name string - data *BridgeExit - }{ - { - name: "metadataHashed", - data: &BridgeExit{ - LeafType: LeafTypeAsset, - IsMetadataHashed: true, - Metadata: []byte{0x01, 0x02, 0x03}, - }, - }, - { - name: "metadata no hashed", - data: &BridgeExit{ - LeafType: LeafTypeAsset, - IsMetadataHashed: false, - Metadata: []byte{0x01, 0x02, 0x03}, - }, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - ser, err := json.Marshal(tt.data) - require.NoError(t, err) - unmarshalled := &BridgeExit{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, tt.data, unmarshalled) - }) - } -} - -func Test_UnmarshalClaimFromMainnnet(t *testing.T) { - claim := &ClaimFromMainnnet{ - ProofLeafMER: &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - }, - ProofGERToL1Root: &MerkleProof{}, - L1Leaf: &L1InfoTreeLeaf{}, - } - ser, err := json.Marshal(claim) - require.NoError(t, err) - unmarshalled := &ClaimFromMainnnet{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, claim, unmarshalled) -} - -func Test_UnmarshalClaimFromRollup(t *testing.T) { - claim := &ClaimFromRollup{ - ProofLeafLER: &MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [types.DefaultHeight]common.Hash{common.HexToHash("0x2"), common.HexToHash("0x3")}, - }, - ProofLERToRER: &MerkleProof{ - Root: common.HexToHash("0x4"), - }, - ProofGERToL1Root: &MerkleProof{ - Root: common.HexToHash("0x5"), - }, - - L1Leaf: &L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - }, - } - ser, err := json.Marshal(claim) - require.NoError(t, err) - unmarshalled := &ClaimFromRollup{} - err = json.Unmarshal(ser, unmarshalled) - require.NoError(t, err) - require.Equal(t, claim, unmarshalled) -} diff --git a/aggoracle/chaingersender/evm.go b/aggoracle/chaingersender/evm.go deleted file mode 100644 index e099f3df2..000000000 --- a/aggoracle/chaingersender/evm.go +++ /dev/null @@ -1,154 +0,0 @@ -package chaingersender - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/globalexitrootmanagerl2sovereignchain" - cdkcommon "github.com/0xPolygon/cdk/common" - cfgtypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" - ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -const insertGERFuncName = "insertGlobalExitRoot" - -type EthClienter interface { - ethereum.LogFilterer - ethereum.BlockNumberReader - ethereum.ChainReader - bind.ContractBackend -} - -type EthTxManager interface { - Remove(ctx context.Context, id common.Hash) error - ResultsByStatus(ctx context.Context, - statuses []ethtxtypes.MonitoredTxStatus, - ) ([]ethtxtypes.MonitoredTxResult, error) - Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) - Add(ctx context.Context, - to *common.Address, - value *big.Int, - data []byte, - gasOffset uint64, - sidecar *types.BlobTxSidecar, - ) (common.Hash, error) -} - -type L2GERManager interface { - GlobalExitRootMap(opts *bind.CallOpts, ger [common.HashLength]byte) (*big.Int, error) -} - -type EVMConfig struct { - GlobalExitRootL2Addr common.Address `mapstructure:"GlobalExitRootL2"` - URLRPCL2 string `mapstructure:"URLRPCL2"` - GasOffset uint64 `mapstructure:"GasOffset"` - WaitPeriodMonitorTx cfgtypes.Duration `mapstructure:"WaitPeriodMonitorTx"` - EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` -} - -type EVMChainGERSender struct { - logger *log.Logger - - l2GERManager L2GERManager - l2GERManagerAddr common.Address - l2GERManagerAbi *abi.ABI - - ethTxMan EthTxManager - gasOffset uint64 - waitPeriodMonitorTx time.Duration -} - -func NewEVMChainGERSender( - logger *log.Logger, - l2GERManagerAddr common.Address, - l2Client EthClienter, - ethTxMan EthTxManager, - gasOffset uint64, - waitPeriodMonitorTx time.Duration, -) (*EVMChainGERSender, error) { - l2GERManager, err := globalexitrootmanagerl2sovereignchain.NewGlobalexitrootmanagerl2sovereignchain( - l2GERManagerAddr, l2Client) - if err != nil { - return nil, err - } - - l2GERAbi, err := globalexitrootmanagerl2sovereignchain.Globalexitrootmanagerl2sovereignchainMetaData.GetAbi() - if err != nil { - return nil, err - } - - return &EVMChainGERSender{ - logger: logger, - l2GERManager: l2GERManager, - l2GERManagerAddr: l2GERManagerAddr, - l2GERManagerAbi: l2GERAbi, - ethTxMan: ethTxMan, - gasOffset: gasOffset, - waitPeriodMonitorTx: waitPeriodMonitorTx, - }, nil -} - -func (c *EVMChainGERSender) IsGERInjected(ger common.Hash) (bool, error) { - blockHashBigInt, err := c.l2GERManager.GlobalExitRootMap(&bind.CallOpts{Pending: false}, ger) - if err != nil { - return false, fmt.Errorf("failed to check if global exit root is injected %s: %w", ger, err) - } - - return common.BigToHash(blockHashBigInt) != cdkcommon.ZeroHash, nil -} - -func (c *EVMChainGERSender) InjectGER(ctx context.Context, ger common.Hash) error { - ticker := time.NewTicker(c.waitPeriodMonitorTx) - defer ticker.Stop() - - updateGERTxInput, err := c.l2GERManagerAbi.Pack(insertGERFuncName, ger) - if err != nil { - return err - } - - id, err := c.ethTxMan.Add(ctx, &c.l2GERManagerAddr, common.Big0, updateGERTxInput, c.gasOffset, nil) - if err != nil { - return err - } - - for { - select { - case <-ctx.Done(): - c.logger.Infof("context cancelled") - return nil - - case <-ticker.C: - c.logger.Debugf("waiting for tx %s to be mined", id.Hex()) - res, err := c.ethTxMan.Result(ctx, id) - if err != nil { - c.logger.Errorf("failed to check the transaction %s status: %s", id.Hex(), err) - return err - } - - switch res.Status { - case ethtxtypes.MonitoredTxStatusCreated, - ethtxtypes.MonitoredTxStatusSent: - continue - case ethtxtypes.MonitoredTxStatusFailed: - return fmt.Errorf("inject GER tx %s failed", id.Hex()) - case ethtxtypes.MonitoredTxStatusMined, - ethtxtypes.MonitoredTxStatusSafe, - ethtxtypes.MonitoredTxStatusFinalized: - c.logger.Debugf("inject GER tx %s was successfully mined at block %d", id.Hex(), res.MinedAtBlockNumber) - - return nil - default: - c.logger.Error("unexpected tx status:", res.Status) - } - } - } -} diff --git a/aggoracle/chaingersender/evm_test.go b/aggoracle/chaingersender/evm_test.go deleted file mode 100644 index ad1cb5af0..000000000 --- a/aggoracle/chaingersender/evm_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package chaingersender - -import ( - "context" - "errors" - "math/big" - "strings" - "testing" - "time" - - "github.com/0xPolygon/cdk/aggoracle/mocks" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/zkevm-ethtx-manager/types" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestEVMChainGERSender_InjectGER(t *testing.T) { - insertGERFuncABI := `[{ - "inputs": [ - { - "internalType": "bytes32", - "name": "_newRoot", - "type": "bytes32" - } - ], - "name": "insertGlobalExitRoot", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }]` - l2GERManagerAddr := common.HexToAddress("0x123") - l2GERManagerAbi, err := abi.JSON(strings.NewReader(insertGERFuncABI)) - require.NoError(t, err) - - ger := common.HexToHash("0x456") - txID := common.HexToHash("0x789") - - tests := []struct { - name string - addReturnTxID common.Hash - addReturnErr error - resultReturn types.MonitoredTxResult - resultReturnErr error - expectedErr string - }{ - { - name: "successful injection", - addReturnTxID: txID, - addReturnErr: nil, - resultReturn: types.MonitoredTxResult{Status: types.MonitoredTxStatusMined, MinedAtBlockNumber: big.NewInt(123)}, - resultReturnErr: nil, - expectedErr: "", - }, - { - name: "injection fails due to transaction failure", - addReturnTxID: txID, - addReturnErr: nil, - resultReturn: types.MonitoredTxResult{Status: types.MonitoredTxStatusFailed}, - resultReturnErr: nil, - expectedErr: "inject GER tx", - }, - { - name: "injection fails due to Add method error", - addReturnTxID: common.Hash{}, - addReturnErr: errors.New("add error"), - resultReturn: types.MonitoredTxResult{}, - resultReturnErr: nil, - expectedErr: "add error", - }, - { - name: "injection fails due to Result method error", - addReturnTxID: txID, - addReturnErr: nil, - resultReturn: types.MonitoredTxResult{}, - resultReturnErr: errors.New("result error"), - expectedErr: "result error", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx, cancelFn := context.WithTimeout(context.Background(), time.Millisecond*500) - defer cancelFn() - - ethTxMan := new(mocks.EthTxManagerMock) - ethTxMan. - On("Add", ctx, &l2GERManagerAddr, common.Big0, mock.Anything, mock.Anything, mock.Anything). - Return(tt.addReturnTxID, tt.addReturnErr) - ethTxMan. - On("Result", ctx, tt.addReturnTxID). - Return(tt.resultReturn, tt.resultReturnErr) - - sender := &EVMChainGERSender{ - logger: log.GetDefaultLogger(), - l2GERManagerAddr: l2GERManagerAddr, - l2GERManagerAbi: &l2GERManagerAbi, - ethTxMan: ethTxMan, - waitPeriodMonitorTx: time.Millisecond * 10, - } - - err := sender.InjectGER(ctx, ger) - if tt.expectedErr == "" { - require.NoError(t, err) - } else { - require.Error(t, err) - require.Contains(t, err.Error(), tt.expectedErr) - } - }) - } -} - -func TestEVMChainGERSender_IsGERInjected(t *testing.T) { - tests := []struct { - name string - mockReturn *big.Int - mockError error - expectedResult bool - expectedErrMsg string - }{ - { - name: "GER is injected", - mockReturn: big.NewInt(1), - mockError: nil, - expectedResult: true, - expectedErrMsg: "", - }, - { - name: "GER is not injected", - mockReturn: big.NewInt(0), - mockError: nil, - expectedResult: false, - expectedErrMsg: "", - }, - { - name: "Error checking GER injection", - mockReturn: nil, - mockError: errors.New("some error"), - expectedResult: false, - expectedErrMsg: "failed to check if global exit root is injected", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockL2GERManager := new(mocks.L2GERManagerMock) - mockL2GERManager.On("GlobalExitRootMap", mock.Anything, mock.Anything). - Return(tt.mockReturn, tt.mockError) - - evmChainGERSender := &EVMChainGERSender{ - l2GERManager: mockL2GERManager, - } - - ger := common.HexToHash("0x12345") - result, err := evmChainGERSender.IsGERInjected(ger) - if tt.expectedErrMsg != "" { - require.ErrorContains(t, err, tt.expectedErrMsg) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.expectedResult, result) - - mockL2GERManager.AssertExpectations(t) - }) - } -} diff --git a/aggoracle/config.go b/aggoracle/config.go deleted file mode 100644 index 8559ddb61..000000000 --- a/aggoracle/config.go +++ /dev/null @@ -1,25 +0,0 @@ -package aggoracle - -import ( - "github.com/0xPolygon/cdk/aggoracle/chaingersender" - "github.com/0xPolygon/cdk/config/types" -) - -type TargetChainType string - -const ( - EVMChain TargetChainType = "EVM" -) - -var ( - SupportedChainTypes = []TargetChainType{EVMChain} -) - -type Config struct { - TargetChainType TargetChainType `mapstructure:"TargetChainType"` - URLRPCL1 string `mapstructure:"URLRPCL1"` - // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - WaitPeriodNextGER types.Duration `mapstructure:"WaitPeriodNextGER"` - EVMSender chaingersender.EVMConfig `mapstructure:"EVMSender"` -} diff --git a/aggoracle/e2e_test.go b/aggoracle/e2e_test.go deleted file mode 100644 index 648791ebb..000000000 --- a/aggoracle/e2e_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package aggoracle_test - -import ( - "fmt" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestEVM(t *testing.T) { - setup := helpers.NewE2EEnvWithEVML2(t) - - for i := 0; i < 10; i++ { - _, err := setup.L1Environment.GERContract.UpdateExitRoot(setup.L1Environment.Auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - setup.L1Environment.SimBackend.Commit() - - // wait for the GER to be processed by the InfoTree syncer - time.Sleep(time.Millisecond * 100) - expectedGER, err := setup.L1Environment.GERContract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - - isInjected, err := setup.L2Environment.AggoracleSender.IsGERInjected(expectedGER) - require.NoError(t, err) - - require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) - } -} diff --git a/aggoracle/mocks/mock_ethtxmanager.go b/aggoracle/mocks/mock_ethtxmanager.go deleted file mode 100644 index b011f5682..000000000 --- a/aggoracle/mocks/mock_ethtxmanager.go +++ /dev/null @@ -1,270 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - common "github.com/ethereum/go-ethereum/common" - - context "context" - - mock "github.com/stretchr/testify/mock" - - types "github.com/ethereum/go-ethereum/core/types" - - zkevm_ethtx_managertypes "github.com/0xPolygon/zkevm-ethtx-manager/types" -) - -// EthTxManagerMock is an autogenerated mock type for the EthTxManager type -type EthTxManagerMock struct { - mock.Mock -} - -type EthTxManagerMock_Expecter struct { - mock *mock.Mock -} - -func (_m *EthTxManagerMock) EXPECT() *EthTxManagerMock_Expecter { - return &EthTxManagerMock_Expecter{mock: &_m.Mock} -} - -// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar -func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { - ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) - - if len(ret) == 0 { - panic("no return value specified for Add") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { - return rf(ctx, to, value, data, gasOffset, sidecar) - } - if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { - r0 = rf(ctx, to, value, data, gasOffset, sidecar) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { - r1 = rf(ctx, to, value, data, gasOffset, sidecar) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthTxManagerMock_Add_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Add' -type EthTxManagerMock_Add_Call struct { - *mock.Call -} - -// Add is a helper method to define mock.On call -// - ctx context.Context -// - to *common.Address -// - value *big.Int -// - data []byte -// - gasOffset uint64 -// - sidecar *types.BlobTxSidecar -func (_e *EthTxManagerMock_Expecter) Add(ctx interface{}, to interface{}, value interface{}, data interface{}, gasOffset interface{}, sidecar interface{}) *EthTxManagerMock_Add_Call { - return &EthTxManagerMock_Add_Call{Call: _e.mock.On("Add", ctx, to, value, data, gasOffset, sidecar)} -} - -func (_c *EthTxManagerMock_Add_Call) Run(run func(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar)) *EthTxManagerMock_Add_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*common.Address), args[2].(*big.Int), args[3].([]byte), args[4].(uint64), args[5].(*types.BlobTxSidecar)) - }) - return _c -} - -func (_c *EthTxManagerMock_Add_Call) Return(_a0 common.Hash, _a1 error) *EthTxManagerMock_Add_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthTxManagerMock_Add_Call) RunAndReturn(run func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)) *EthTxManagerMock_Add_Call { - _c.Call.Return(run) - return _c -} - -// Remove provides a mock function with given fields: ctx, id -func (_m *EthTxManagerMock) Remove(ctx context.Context, id common.Hash) error { - ret := _m.Called(ctx, id) - - if len(ret) == 0 { - panic("no return value specified for Remove") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { - r0 = rf(ctx, id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// EthTxManagerMock_Remove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Remove' -type EthTxManagerMock_Remove_Call struct { - *mock.Call -} - -// Remove is a helper method to define mock.On call -// - ctx context.Context -// - id common.Hash -func (_e *EthTxManagerMock_Expecter) Remove(ctx interface{}, id interface{}) *EthTxManagerMock_Remove_Call { - return &EthTxManagerMock_Remove_Call{Call: _e.mock.On("Remove", ctx, id)} -} - -func (_c *EthTxManagerMock_Remove_Call) Run(run func(ctx context.Context, id common.Hash)) *EthTxManagerMock_Remove_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthTxManagerMock_Remove_Call) Return(_a0 error) *EthTxManagerMock_Remove_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EthTxManagerMock_Remove_Call) RunAndReturn(run func(context.Context, common.Hash) error) *EthTxManagerMock_Remove_Call { - _c.Call.Return(run) - return _c -} - -// Result provides a mock function with given fields: ctx, id -func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { - ret := _m.Called(ctx, id) - - if len(ret) == 0 { - panic("no return value specified for Result") - } - - var r0 zkevm_ethtx_managertypes.MonitoredTxResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { - return rf(ctx, id) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) zkevm_ethtx_managertypes.MonitoredTxResult); ok { - r0 = rf(ctx, id) - } else { - r0 = ret.Get(0).(zkevm_ethtx_managertypes.MonitoredTxResult) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthTxManagerMock_Result_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Result' -type EthTxManagerMock_Result_Call struct { - *mock.Call -} - -// Result is a helper method to define mock.On call -// - ctx context.Context -// - id common.Hash -func (_e *EthTxManagerMock_Expecter) Result(ctx interface{}, id interface{}) *EthTxManagerMock_Result_Call { - return &EthTxManagerMock_Result_Call{Call: _e.mock.On("Result", ctx, id)} -} - -func (_c *EthTxManagerMock_Result_Call) Run(run func(ctx context.Context, id common.Hash)) *EthTxManagerMock_Result_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthTxManagerMock_Result_Call) Return(_a0 zkevm_ethtx_managertypes.MonitoredTxResult, _a1 error) *EthTxManagerMock_Result_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthTxManagerMock_Result_Call) RunAndReturn(run func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)) *EthTxManagerMock_Result_Call { - _c.Call.Return(run) - return _c -} - -// ResultsByStatus provides a mock function with given fields: ctx, statuses -func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { - ret := _m.Called(ctx, statuses) - - if len(ret) == 0 { - panic("no return value specified for ResultsByStatus") - } - - var r0 []zkevm_ethtx_managertypes.MonitoredTxResult - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { - return rf(ctx, statuses) - } - if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) []zkevm_ethtx_managertypes.MonitoredTxResult); ok { - r0 = rf(ctx, statuses) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]zkevm_ethtx_managertypes.MonitoredTxResult) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) error); ok { - r1 = rf(ctx, statuses) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthTxManagerMock_ResultsByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResultsByStatus' -type EthTxManagerMock_ResultsByStatus_Call struct { - *mock.Call -} - -// ResultsByStatus is a helper method to define mock.On call -// - ctx context.Context -// - statuses []zkevm_ethtx_managertypes.MonitoredTxStatus -func (_e *EthTxManagerMock_Expecter) ResultsByStatus(ctx interface{}, statuses interface{}) *EthTxManagerMock_ResultsByStatus_Call { - return &EthTxManagerMock_ResultsByStatus_Call{Call: _e.mock.On("ResultsByStatus", ctx, statuses)} -} - -func (_c *EthTxManagerMock_ResultsByStatus_Call) Run(run func(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus)) *EthTxManagerMock_ResultsByStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].([]zkevm_ethtx_managertypes.MonitoredTxStatus)) - }) - return _c -} - -func (_c *EthTxManagerMock_ResultsByStatus_Call) Return(_a0 []zkevm_ethtx_managertypes.MonitoredTxResult, _a1 error) *EthTxManagerMock_ResultsByStatus_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthTxManagerMock_ResultsByStatus_Call) RunAndReturn(run func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)) *EthTxManagerMock_ResultsByStatus_Call { - _c.Call.Return(run) - return _c -} - -// NewEthTxManagerMock creates a new instance of EthTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthTxManagerMock(t interface { - mock.TestingT - Cleanup(func()) -}) *EthTxManagerMock { - mock := &EthTxManagerMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggoracle/mocks/mock_l2germanager.go b/aggoracle/mocks/mock_l2germanager.go deleted file mode 100644 index a7ec0296f..000000000 --- a/aggoracle/mocks/mock_l2germanager.go +++ /dev/null @@ -1,97 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - bind "github.com/ethereum/go-ethereum/accounts/abi/bind" - - mock "github.com/stretchr/testify/mock" -) - -// L2GERManagerMock is an autogenerated mock type for the L2GERManager type -type L2GERManagerMock struct { - mock.Mock -} - -type L2GERManagerMock_Expecter struct { - mock *mock.Mock -} - -func (_m *L2GERManagerMock) EXPECT() *L2GERManagerMock_Expecter { - return &L2GERManagerMock_Expecter{mock: &_m.Mock} -} - -// GlobalExitRootMap provides a mock function with given fields: opts, ger -func (_m *L2GERManagerMock) GlobalExitRootMap(opts *bind.CallOpts, ger [32]byte) (*big.Int, error) { - ret := _m.Called(opts, ger) - - if len(ret) == 0 { - panic("no return value specified for GlobalExitRootMap") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(*bind.CallOpts, [32]byte) (*big.Int, error)); ok { - return rf(opts, ger) - } - if rf, ok := ret.Get(0).(func(*bind.CallOpts, [32]byte) *big.Int); ok { - r0 = rf(opts, ger) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(*bind.CallOpts, [32]byte) error); ok { - r1 = rf(opts, ger) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2GERManagerMock_GlobalExitRootMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GlobalExitRootMap' -type L2GERManagerMock_GlobalExitRootMap_Call struct { - *mock.Call -} - -// GlobalExitRootMap is a helper method to define mock.On call -// - opts *bind.CallOpts -// - ger [32]byte -func (_e *L2GERManagerMock_Expecter) GlobalExitRootMap(opts interface{}, ger interface{}) *L2GERManagerMock_GlobalExitRootMap_Call { - return &L2GERManagerMock_GlobalExitRootMap_Call{Call: _e.mock.On("GlobalExitRootMap", opts, ger)} -} - -func (_c *L2GERManagerMock_GlobalExitRootMap_Call) Run(run func(opts *bind.CallOpts, ger [32]byte)) *L2GERManagerMock_GlobalExitRootMap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*bind.CallOpts), args[1].([32]byte)) - }) - return _c -} - -func (_c *L2GERManagerMock_GlobalExitRootMap_Call) Return(_a0 *big.Int, _a1 error) *L2GERManagerMock_GlobalExitRootMap_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2GERManagerMock_GlobalExitRootMap_Call) RunAndReturn(run func(*bind.CallOpts, [32]byte) (*big.Int, error)) *L2GERManagerMock_GlobalExitRootMap_Call { - _c.Call.Return(run) - return _c -} - -// NewL2GERManagerMock creates a new instance of L2GERManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL2GERManagerMock(t interface { - mock.TestingT - Cleanup(func()) -}) *L2GERManagerMock { - mock := &L2GERManagerMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggoracle/oracle.go b/aggoracle/oracle.go deleted file mode 100644 index 2bdc469f8..000000000 --- a/aggoracle/oracle.go +++ /dev/null @@ -1,138 +0,0 @@ -package aggoracle - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" -) - -type L1InfoTreer interface { - GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) -} - -type ChainSender interface { - IsGERInjected(ger common.Hash) (bool, error) - InjectGER(ctx context.Context, ger common.Hash) error -} - -type AggOracle struct { - logger *log.Logger - waitPeriodNextGER time.Duration - l1Client ethereum.ChainReader - l1Info L1InfoTreer - chainSender ChainSender - blockFinality *big.Int -} - -func New( - logger *log.Logger, - chainSender ChainSender, - l1Client ethereum.ChainReader, - l1InfoTreeSyncer L1InfoTreer, - blockFinalityType etherman.BlockNumberFinality, - waitPeriodNextGER time.Duration, -) (*AggOracle, error) { - finality, err := blockFinalityType.ToBlockNum() - if err != nil { - return nil, err - } - - return &AggOracle{ - logger: logger, - chainSender: chainSender, - l1Client: l1Client, - l1Info: l1InfoTreeSyncer, - blockFinality: finality, - waitPeriodNextGER: waitPeriodNextGER, - }, nil -} - -func (a *AggOracle) Start(ctx context.Context) { - ticker := time.NewTicker(a.waitPeriodNextGER) - defer ticker.Stop() - - var blockNumToFetch uint64 - - for { - select { - case <-ticker.C: - if err := a.processLatestGER(ctx, &blockNumToFetch); err != nil { - a.handleGERProcessingError(err, blockNumToFetch) - } - - case <-ctx.Done(): - return - } - } -} - -// processLatestGER fetches the latest finalized GER, checks if it is already injected and injects it if not -func (a *AggOracle) processLatestGER(ctx context.Context, blockNumToFetch *uint64) error { - // Fetch the latest GER - blockNum, gerToInject, err := a.getLastFinalizedGER(ctx, *blockNumToFetch) - if err != nil { - return err - } - - // Update the block number for the next iteration - *blockNumToFetch = blockNum - - isGERInjected, err := a.chainSender.IsGERInjected(gerToInject) - if err != nil { - return fmt.Errorf("error checking if GER is already injected: %w", err) - } - - if isGERInjected { - a.logger.Debugf("GER %s is already injected", gerToInject.Hex()) - return nil - } - - a.logger.Infof("injecting new GER: %s", gerToInject.Hex()) - if err := a.chainSender.InjectGER(ctx, gerToInject); err != nil { - return fmt.Errorf("error injecting GER %s: %w", gerToInject.Hex(), err) - } - - a.logger.Infof("GER %s is injected successfully", gerToInject.Hex()) - return nil -} - -// handleGERProcessingError handles global exit root processing error -func (a *AggOracle) handleGERProcessingError(err error, blockNumToFetch uint64) { - switch { - case errors.Is(err, l1infotreesync.ErrBlockNotProcessed): - a.logger.Debugf("syncer is not ready for the block %d", blockNumToFetch) - case errors.Is(err, l1infotreesync.ErrNotFound): - a.logger.Debugf("syncer has not found any GER until block %d", blockNumToFetch) - default: - a.logger.Error("unexpected error processing GER: ", err) - } -} - -// getLastFinalizedGER tries to return a finalised GER: -// If targetBlockNum != 0: it will try to fetch it until the given block -// Else it will ask the L1 client for the latest finalised block and use that. -// If it fails to get the GER from the syncer, it will return the block number that used to query -func (a *AggOracle) getLastFinalizedGER(ctx context.Context, targetBlockNum uint64) (uint64, common.Hash, error) { - if targetBlockNum == 0 { - header, err := a.l1Client.HeaderByNumber(ctx, a.blockFinality) - if err != nil { - return 0, common.Hash{}, err - } - targetBlockNum = header.Number.Uint64() - } - - info, err := a.l1Info.GetLatestInfoUntilBlock(ctx, targetBlockNum) - if err != nil { - return targetBlockNum, common.Hash{}, err - } - - return 0, info.GlobalExitRoot, nil -} diff --git a/aggregator/aggregator.go b/aggregator/aggregator.go index 5b7327aa9..832aa2487 100644 --- a/aggregator/aggregator.go +++ b/aggregator/aggregator.go @@ -22,8 +22,6 @@ import ( "github.com/0xPolygon/cdk/aggregator/prover" cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/l1infotree" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/rpc" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" @@ -33,6 +31,8 @@ import ( "github.com/0xPolygonHermez/zkevm-synchronizer-l1/state/entities" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer/l1_check_block" + "github.com/agglayer/aggkit/l1infotree" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" "go.uber.org/zap/zapcore" "google.golang.org/grpc" diff --git a/aggregator/aggregator_test.go b/aggregator/aggregator_test.go index 086550ebf..6e012fb63 100644 --- a/aggregator/aggregator_test.go +++ b/aggregator/aggregator_test.go @@ -21,10 +21,10 @@ import ( mocks "github.com/0xPolygon/cdk/aggregator/mocks" "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" rpctypes "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/assert" diff --git a/aggregator/config.go b/aggregator/config.go index e17d68af4..477f50882 100644 --- a/aggregator/config.go +++ b/aggregator/config.go @@ -5,9 +5,9 @@ import ( "math/big" "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" syncronizerConfig "github.com/0xPolygonHermez/zkevm-synchronizer-l1/config" + "github.com/agglayer/aggkit/log" ) // SettlementBackend is the type of the settlement backend diff --git a/aggregator/db/dbstorage/dbstorage.go b/aggregator/db/dbstorage/dbstorage.go index b20a1c715..05af9aa76 100644 --- a/aggregator/db/dbstorage/dbstorage.go +++ b/aggregator/db/dbstorage/dbstorage.go @@ -4,7 +4,7 @@ import ( "context" "database/sql" - "github.com/0xPolygon/cdk/db" + "github.com/agglayer/aggkit/db" ) // DBStorage implements the Storage interface diff --git a/aggregator/db/dbstorage/proof.go b/aggregator/db/dbstorage/proof.go index d3065c7e1..46a6def79 100644 --- a/aggregator/db/dbstorage/proof.go +++ b/aggregator/db/dbstorage/proof.go @@ -7,8 +7,8 @@ import ( "fmt" "time" - "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/state" + "github.com/agglayer/aggkit/db" ) // CheckProofExistsForBatch checks if the batch is already included in any proof @@ -170,7 +170,7 @@ func (d *DBStorage) GetProofsToAggregate(ctx context.Context, dbTx db.Txer) (*st &createdAt1, &updatedAt1, &proof2.BatchNumber, &proof2.BatchNumberFinal, &proof2.Proof, &proof2.ProofID, &proof2.InputProver, &proof2.Prover, &proof2.ProverID, &generatingSince2, - &createdAt1, &updatedAt1, + &createdAt2, &updatedAt2, ) if generatingSince1 != nil { diff --git a/aggregator/db/dbstorage/sequence.go b/aggregator/db/dbstorage/sequence.go index 960632010..ed509e64f 100644 --- a/aggregator/db/dbstorage/sequence.go +++ b/aggregator/db/dbstorage/sequence.go @@ -3,8 +3,8 @@ package dbstorage import ( "context" - "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/state" + "github.com/agglayer/aggkit/db" ) // AddSequence stores the sequence information to allow the aggregator verify sequences. diff --git a/aggregator/db/logger.go b/aggregator/db/logger.go index e60a7b011..441a54fc0 100644 --- a/aggregator/db/logger.go +++ b/aggregator/db/logger.go @@ -4,7 +4,7 @@ import ( "context" "fmt" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/jackc/pgx/v4" ) diff --git a/aggregator/db/migrations.go b/aggregator/db/migrations.go index 221fb1450..695eb0c99 100644 --- a/aggregator/db/migrations.go +++ b/aggregator/db/migrations.go @@ -4,8 +4,8 @@ import ( "embed" "fmt" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/db" + "github.com/agglayer/aggkit/log" migrate "github.com/rubenv/sql-migrate" ) diff --git a/aggregator/interfaces.go b/aggregator/interfaces.go index 5979272d9..3a868d56c 100644 --- a/aggregator/interfaces.go +++ b/aggregator/interfaces.go @@ -7,11 +7,11 @@ import ( ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" "github.com/0xPolygon/cdk/aggregator/prover" - "github.com/0xPolygon/cdk/db" "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/agglayer/aggkit/db" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto/kzg4844" diff --git a/aggregator/mocks/mock_eth_tx_manager.go b/aggregator/mocks/mock_eth_tx_manager_client.go similarity index 99% rename from aggregator/mocks/mock_eth_tx_manager.go rename to aggregator/mocks/mock_eth_tx_manager_client.go index 47dabe1ca..339e6b893 100644 --- a/aggregator/mocks/mock_eth_tx_manager.go +++ b/aggregator/mocks/mock_eth_tx_manager_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks diff --git a/aggregator/mocks/mock_etherman.go b/aggregator/mocks/mock_etherman.go index 2982430df..8dab429a4 100644 --- a/aggregator/mocks/mock_etherman.go +++ b/aggregator/mocks/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks diff --git a/aggregator/mocks/mock_prover.go b/aggregator/mocks/mock_prover_interface.go similarity index 99% rename from aggregator/mocks/mock_prover.go rename to aggregator/mocks/mock_prover_interface.go index 7939667d1..f48d4ff07 100644 --- a/aggregator/mocks/mock_prover.go +++ b/aggregator/mocks/mock_prover_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks diff --git a/aggregator/mocks/mock_rpc.go b/aggregator/mocks/mock_rpc_interface.go similarity index 98% rename from aggregator/mocks/mock_rpc.go rename to aggregator/mocks/mock_rpc_interface.go index 3927b5db0..6e3871b3b 100644 --- a/aggregator/mocks/mock_rpc.go +++ b/aggregator/mocks/mock_rpc_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks diff --git a/aggregator/mocks/mock_storage.go b/aggregator/mocks/mock_storage_interface.go similarity index 99% rename from aggregator/mocks/mock_storage.go rename to aggregator/mocks/mock_storage_interface.go index 2615f6bca..42aaf118a 100644 --- a/aggregator/mocks/mock_storage.go +++ b/aggregator/mocks/mock_storage_interface.go @@ -1,11 +1,11 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks import ( context "context" - db "github.com/0xPolygon/cdk/db" + db "github.com/agglayer/aggkit/db" mock "github.com/stretchr/testify/mock" sql "database/sql" diff --git a/aggregator/mocks/mock_synchronizer.go b/aggregator/mocks/mock_synchronizer.go index ae7735b17..e141178af 100644 --- a/aggregator/mocks/mock_synchronizer.go +++ b/aggregator/mocks/mock_synchronizer.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.50.2. DO NOT EDIT. package mocks diff --git a/aggregator/prover/mocks/mock_channel.go b/aggregator/prover/mocks/mock_aggregator_service_channel_server.go similarity index 92% rename from aggregator/prover/mocks/mock_channel.go rename to aggregator/prover/mocks/mock_aggregator_service_channel_server.go index 9ed3c47b9..f65c2f694 100644 --- a/aggregator/prover/mocks/mock_channel.go +++ b/aggregator/prover/mocks/mock_aggregator_service_channel_server.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks @@ -129,7 +129,7 @@ func (_c *ChannelMock_Recv_Call) RunAndReturn(run func() (*prover.ProverMessage, } // RecvMsg provides a mock function with given fields: m -func (_m *ChannelMock) RecvMsg(m interface{}) error { +func (_m *ChannelMock) RecvMsg(m any) error { ret := _m.Called(m) if len(ret) == 0 { @@ -137,7 +137,7 @@ func (_m *ChannelMock) RecvMsg(m interface{}) error { } var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { + if rf, ok := ret.Get(0).(func(any) error); ok { r0 = rf(m) } else { r0 = ret.Error(0) @@ -152,14 +152,14 @@ type ChannelMock_RecvMsg_Call struct { } // RecvMsg is a helper method to define mock.On call -// - m interface{} +// - m any func (_e *ChannelMock_Expecter) RecvMsg(m interface{}) *ChannelMock_RecvMsg_Call { return &ChannelMock_RecvMsg_Call{Call: _e.mock.On("RecvMsg", m)} } -func (_c *ChannelMock_RecvMsg_Call) Run(run func(m interface{})) *ChannelMock_RecvMsg_Call { +func (_c *ChannelMock_RecvMsg_Call) Run(run func(m any)) *ChannelMock_RecvMsg_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(interface{})) + run(args[0].(any)) }) return _c } @@ -169,7 +169,7 @@ func (_c *ChannelMock_RecvMsg_Call) Return(_a0 error) *ChannelMock_RecvMsg_Call return _c } -func (_c *ChannelMock_RecvMsg_Call) RunAndReturn(run func(interface{}) error) *ChannelMock_RecvMsg_Call { +func (_c *ChannelMock_RecvMsg_Call) RunAndReturn(run func(any) error) *ChannelMock_RecvMsg_Call { _c.Call.Return(run) return _c } @@ -267,7 +267,7 @@ func (_c *ChannelMock_SendHeader_Call) RunAndReturn(run func(metadata.MD) error) } // SendMsg provides a mock function with given fields: m -func (_m *ChannelMock) SendMsg(m interface{}) error { +func (_m *ChannelMock) SendMsg(m any) error { ret := _m.Called(m) if len(ret) == 0 { @@ -275,7 +275,7 @@ func (_m *ChannelMock) SendMsg(m interface{}) error { } var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { + if rf, ok := ret.Get(0).(func(any) error); ok { r0 = rf(m) } else { r0 = ret.Error(0) @@ -290,14 +290,14 @@ type ChannelMock_SendMsg_Call struct { } // SendMsg is a helper method to define mock.On call -// - m interface{} +// - m any func (_e *ChannelMock_Expecter) SendMsg(m interface{}) *ChannelMock_SendMsg_Call { return &ChannelMock_SendMsg_Call{Call: _e.mock.On("SendMsg", m)} } -func (_c *ChannelMock_SendMsg_Call) Run(run func(m interface{})) *ChannelMock_SendMsg_Call { +func (_c *ChannelMock_SendMsg_Call) Run(run func(m any)) *ChannelMock_SendMsg_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(interface{})) + run(args[0].(any)) }) return _c } @@ -307,7 +307,7 @@ func (_c *ChannelMock_SendMsg_Call) Return(_a0 error) *ChannelMock_SendMsg_Call return _c } -func (_c *ChannelMock_SendMsg_Call) RunAndReturn(run func(interface{}) error) *ChannelMock_SendMsg_Call { +func (_c *ChannelMock_SendMsg_Call) RunAndReturn(run func(any) error) *ChannelMock_SendMsg_Call { _c.Call.Return(run) return _c } diff --git a/aggregator/prover/prover.go b/aggregator/prover/prover.go index ad9c9895f..4b86e6b71 100644 --- a/aggregator/prover/prover.go +++ b/aggregator/prover/prover.go @@ -12,7 +12,7 @@ import ( "time" "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/poseidon" ) diff --git a/aggregator/prover/prover_test.go b/aggregator/prover/prover_test.go index 438718a33..cd25dd2b8 100644 --- a/aggregator/prover/prover_test.go +++ b/aggregator/prover/prover_test.go @@ -11,7 +11,7 @@ import ( "github.com/0xPolygon/cdk/aggregator/prover" "github.com/0xPolygon/cdk/aggregator/prover/mocks" "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" diff --git a/aggsender/aggsender.go b/aggsender/aggsender.go deleted file mode 100644 index 133fb4fa3..000000000 --- a/aggsender/aggsender.go +++ /dev/null @@ -1,882 +0,0 @@ -package aggsender - -import ( - "context" - "crypto/ecdsa" - "encoding/json" - "errors" - "fmt" - "os" - "time" - - zkevm "github.com/0xPolygon/cdk" - jRPC "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db" - aggsenderrpc "github.com/0xPolygon/cdk/aggsender/rpc" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/bridgesync" - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -const signatureSize = 65 - -var ( - errNoBridgesAndClaims = errors.New("no bridges and claims to build certificate") - errInvalidSignatureSize = errors.New("invalid signature size") - - zeroLER = common.HexToHash("0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757") -) - -// AggSender is a component that will send certificates to the aggLayer -type AggSender struct { - log types.Logger - - l2Syncer types.L2BridgeSyncer - l1infoTreeSyncer types.L1InfoTreeSyncer - epochNotifier types.EpochNotifier - - storage db.AggSenderStorage - aggLayerClient agglayer.AgglayerClientInterface - - cfg Config - - sequencerKey *ecdsa.PrivateKey - - status types.AggsenderStatus -} - -// New returns a new AggSender -func New( - ctx context.Context, - logger *log.Logger, - cfg Config, - aggLayerClient agglayer.AgglayerClientInterface, - l1InfoTreeSyncer *l1infotreesync.L1InfoTreeSync, - l2Syncer types.L2BridgeSyncer, - epochNotifier types.EpochNotifier) (*AggSender, error) { - storageConfig := db.AggSenderSQLStorageConfig{ - DBPath: cfg.StoragePath, - KeepCertificatesHistory: cfg.KeepCertificatesHistory, - } - storage, err := db.NewAggSenderSQLStorage(logger, storageConfig) - if err != nil { - return nil, err - } - - sequencerPrivateKey, err := cdkcommon.NewKeyFromKeystore(cfg.AggsenderPrivateKey) - if err != nil { - return nil, err - } - - logger.Infof("Aggsender Config: %s.", cfg.String()) - - return &AggSender{ - cfg: cfg, - log: logger, - storage: storage, - l2Syncer: l2Syncer, - aggLayerClient: aggLayerClient, - l1infoTreeSyncer: l1InfoTreeSyncer, - sequencerKey: sequencerPrivateKey, - epochNotifier: epochNotifier, - status: types.AggsenderStatus{Status: types.StatusNone}, - }, nil -} - -func (a *AggSender) Info() types.AggsenderInfo { - res := types.AggsenderInfo{ - AggsenderStatus: a.status, - Version: zkevm.GetVersion(), - EpochNotifierDescription: a.epochNotifier.String(), - NetworkID: a.l2Syncer.OriginNetwork(), - } - return res -} - -// GetRPCServices returns the list of services that the RPC provider exposes -func (a *AggSender) GetRPCServices() []jRPC.Service { - if !a.cfg.EnableRPC { - return []jRPC.Service{} - } - - logger := log.WithFields("aggsender-rpc", cdkcommon.BRIDGE) - return []jRPC.Service{ - { - Name: "aggsender", - Service: aggsenderrpc.NewAggsenderRPC(logger, a.storage, a), - }, - } -} - -// Start starts the AggSender -func (a *AggSender) Start(ctx context.Context) { - a.log.Info("AggSender started") - a.status.Start(time.Now().UTC()) - a.checkInitialStatus(ctx) - a.sendCertificates(ctx) -} - -// checkInitialStatus check local status vs agglayer status -func (a *AggSender) checkInitialStatus(ctx context.Context) { - ticker := time.NewTicker(a.cfg.DelayBeetweenRetries.Duration) - defer ticker.Stop() - a.status.Status = types.StatusCheckingInitialStage - for { - err := a.checkLastCertificateFromAgglayer(ctx) - a.status.SetLastError(err) - if err != nil { - a.log.Errorf("error checking initial status: %w, retrying in %s", err, a.cfg.DelayBeetweenRetries.String()) - } else { - a.log.Info("Initial status checked successfully") - return - } - select { - case <-ctx.Done(): - return - case <-ticker.C: - } - } -} - -// sendCertificates sends certificates to the aggLayer -func (a *AggSender) sendCertificates(ctx context.Context) { - chEpoch := a.epochNotifier.Subscribe("aggsender") - a.status.Status = types.StatusCertificateStage - for { - select { - case epoch := <-chEpoch: - a.log.Infof("Epoch received: %s", epoch.String()) - thereArePendingCerts := a.checkPendingCertificatesStatus(ctx) - if !thereArePendingCerts { - _, err := a.sendCertificate(ctx) - a.status.SetLastError(err) - if err != nil { - a.log.Error(err) - } - } else { - log.Infof("Skipping epoch %s because there are pending certificates", - epoch.String()) - } - case <-ctx.Done(): - a.log.Info("AggSender stopped") - return - } - } -} - -// sendCertificate sends certificate for a network -func (a *AggSender) sendCertificate(ctx context.Context) (*agglayer.SignedCertificate, error) { - a.log.Infof("trying to send a new certificate...") - - shouldSend, err := a.shouldSendCertificate() - if err != nil { - return nil, err - } - - if !shouldSend { - a.log.Infof("waiting for pending certificates to be settled") - return nil, nil - } - - lastL2BlockSynced, err := a.l2Syncer.GetLastProcessedBlock(ctx) - if err != nil { - return nil, fmt.Errorf("error getting last processed block from l2: %w", err) - } - - lastSentCertificateInfo, err := a.storage.GetLastSentCertificate() - if err != nil { - return nil, err - } - - previousToBlock, retryCount := getLastSentBlockAndRetryCount(lastSentCertificateInfo) - - if previousToBlock >= lastL2BlockSynced { - a.log.Infof("no new blocks to send a certificate, last certificate block: %d, last L2 block: %d", - previousToBlock, lastL2BlockSynced) - return nil, nil - } - - fromBlock := previousToBlock + 1 - toBlock := lastL2BlockSynced - - bridges, err := a.l2Syncer.GetBridgesPublished(ctx, fromBlock, toBlock) - if err != nil { - return nil, fmt.Errorf("error getting bridges: %w", err) - } - - if len(bridges) == 0 { - a.log.Infof("no bridges consumed, no need to send a certificate from block: %d to block: %d", fromBlock, toBlock) - return nil, nil - } - - claims, err := a.l2Syncer.GetClaims(ctx, fromBlock, toBlock) - if err != nil { - return nil, fmt.Errorf("error getting claims: %w", err) - } - certificateParams := &types.CertificateBuildParams{ - FromBlock: fromBlock, - ToBlock: toBlock, - Bridges: bridges, - Claims: claims, - CreatedAt: uint32(time.Now().UTC().Unix()), - } - - certificateParams, err = a.limitCertSize(certificateParams) - if err != nil { - return nil, fmt.Errorf("error limitCertSize: %w", err) - } - a.log.Infof("building certificate for %s estimatedSize=%d", - certificateParams.String(), certificateParams.EstimatedSize()) - - certificate, err := a.buildCertificate(ctx, certificateParams, lastSentCertificateInfo) - if err != nil { - return nil, fmt.Errorf("error building certificate: %w", err) - } - - signedCertificate, err := a.signCertificate(certificate) - if err != nil { - return nil, fmt.Errorf("error signing certificate: %w", err) - } - - a.saveCertificateToFile(signedCertificate) - a.log.Infof("certificate ready to be send to AggLayer: %s", signedCertificate.Brief()) - if a.cfg.DryRun { - a.log.Warn("dry run mode enabled, skipping sending certificate") - return signedCertificate, nil - } - certificateHash, err := a.aggLayerClient.SendCertificate(signedCertificate) - if err != nil { - return nil, fmt.Errorf("error sending certificate: %w", err) - } - - a.log.Debugf("certificate send: Height: %d cert: %s", signedCertificate.Height, signedCertificate.Brief()) - - raw, err := json.Marshal(signedCertificate) - if err != nil { - return nil, fmt.Errorf("error marshalling signed certificate. Cert:%s. Err: %w", signedCertificate.Brief(), err) - } - - prevLER := common.BytesToHash(certificate.PrevLocalExitRoot[:]) - certInfo := types.CertificateInfo{ - Height: certificate.Height, - RetryCount: retryCount, - CertificateID: certificateHash, - NewLocalExitRoot: certificate.NewLocalExitRoot, - PreviousLocalExitRoot: &prevLER, - FromBlock: fromBlock, - ToBlock: toBlock, - CreatedAt: certificateParams.CreatedAt, - UpdatedAt: certificateParams.CreatedAt, - SignedCertificate: string(raw), - } - // TODO: Improve this case, if a cert is not save in the storage, we are going to settle a unknown certificate - err = a.saveCertificateToStorage(ctx, certInfo, a.cfg.MaxRetriesStoreCertificate) - if err != nil { - a.log.Errorf("error saving certificate to storage. Cert:%s Err: %w", certInfo.String(), err) - return nil, fmt.Errorf("error saving last sent certificate %s in db: %w", certInfo.String(), err) - } - - a.log.Infof("certificate: %s sent successfully for range of l2 blocks (from block: %d, to block: %d) cert:%s", - certInfo.ID(), fromBlock, toBlock, signedCertificate.Brief()) - - return signedCertificate, nil -} - -// saveCertificateToStorage saves the certificate to the storage -// it retries if it fails. if param retries == 0 it retries indefinitely -func (a *AggSender) saveCertificateToStorage(ctx context.Context, cert types.CertificateInfo, maxRetries int) error { - retries := 1 - err := fmt.Errorf("initial_error") - for err != nil { - if err = a.storage.SaveLastSentCertificate(ctx, cert); err != nil { - // If this happens we can't work as normal, because local DB is outdated, we have to retry - a.log.Errorf("error saving last sent certificate %s in db: %w", cert.String(), err) - if retries == maxRetries { - return fmt.Errorf("error saving last sent certificate %s in db: %w", cert.String(), err) - } else { - retries++ - time.Sleep(a.cfg.DelayBeetweenRetries.Duration) - } - } - } - return nil -} - -func (a *AggSender) limitCertSize(fullCert *types.CertificateBuildParams) (*types.CertificateBuildParams, error) { - currentCert := fullCert - var previousCert *types.CertificateBuildParams - var err error - for { - if currentCert.NumberOfBridges() == 0 { - // We can't reduce more the certificate, so this is the minium size - a.log.Warnf("We reach the minium size of bridge.Certificate size: %d >max size: %d", - previousCert.EstimatedSize(), a.cfg.MaxCertSize) - return previousCert, nil - } - - if a.cfg.MaxCertSize == 0 || currentCert.EstimatedSize() < a.cfg.MaxCertSize { - return currentCert, nil - } - - // Minimum size of the certificate - if currentCert.NumberOfBlocks() <= 1 { - a.log.Warnf("reach the minium num blocks [%d to %d].Certificate size: %d >max size: %d", - currentCert.FromBlock, currentCert.ToBlock, currentCert.EstimatedSize(), a.cfg.MaxCertSize) - return currentCert, nil - } - previousCert = currentCert - currentCert, err = currentCert.Range(currentCert.FromBlock, currentCert.ToBlock-1) - if err != nil { - return nil, fmt.Errorf("error reducing certificate: %w", err) - } - } -} - -// saveCertificate saves the certificate to a tmp file -func (a *AggSender) saveCertificateToFile(signedCertificate *agglayer.SignedCertificate) { - if signedCertificate == nil || a.cfg.SaveCertificatesToFilesPath == "" { - return - } - fn := fmt.Sprintf("%s/certificate_%04d-%07d.json", - a.cfg.SaveCertificatesToFilesPath, signedCertificate.Height, time.Now().Unix()) - a.log.Infof("saving certificate to file: %s", fn) - jsonData, err := json.MarshalIndent(signedCertificate, "", " ") - if err != nil { - a.log.Errorf("error marshalling certificate: %w", err) - } - - if err = os.WriteFile(fn, jsonData, 0644); err != nil { //nolint:gosec,mnd // we are writing to a tmp file - a.log.Errorf("error writing certificate to file: %w", err) - } -} - -// getNextHeightAndPreviousLER returns the height and previous LER for the new certificate -func (a *AggSender) getNextHeightAndPreviousLER( - lastSentCertificateInfo *types.CertificateInfo) (uint64, common.Hash, error) { - if lastSentCertificateInfo == nil { - return 0, zeroLER, nil - } - if !lastSentCertificateInfo.Status.IsClosed() { - return 0, zeroLER, fmt.Errorf("last certificate %s is not closed (status: %s)", - lastSentCertificateInfo.ID(), lastSentCertificateInfo.Status.String()) - } - if lastSentCertificateInfo.Status.IsSettled() { - return lastSentCertificateInfo.Height + 1, lastSentCertificateInfo.NewLocalExitRoot, nil - } - - if lastSentCertificateInfo.Status.IsInError() { - // We can reuse last one of lastCert? - if lastSentCertificateInfo.PreviousLocalExitRoot != nil { - return lastSentCertificateInfo.Height, *lastSentCertificateInfo.PreviousLocalExitRoot, nil - } - // Is the first one, so we can set the zeroLER - if lastSentCertificateInfo.Height == 0 { - return 0, zeroLER, nil - } - // We get previous certificate that must be settled - a.log.Debugf("last certificate %s is in error, getting previous settled certificate height:%d", - lastSentCertificateInfo.Height-1) - lastSettleCert, err := a.storage.GetCertificateByHeight(lastSentCertificateInfo.Height - 1) - if err != nil { - return 0, common.Hash{}, fmt.Errorf("error getting last settled certificate: %w", err) - } - if lastSettleCert == nil { - return 0, common.Hash{}, fmt.Errorf("none settled certificate: %w", err) - } - if !lastSettleCert.Status.IsSettled() { - return 0, common.Hash{}, fmt.Errorf("last settled certificate %s is not settled (status: %s)", - lastSettleCert.ID(), lastSettleCert.Status.String()) - } - - return lastSentCertificateInfo.Height, lastSettleCert.NewLocalExitRoot, nil - } - return 0, zeroLER, fmt.Errorf("last certificate %s has an unknown status: %s", - lastSentCertificateInfo.ID(), lastSentCertificateInfo.Status.String()) -} - -// buildCertificate builds a certificate from the bridge events -func (a *AggSender) buildCertificate(ctx context.Context, - certParams *types.CertificateBuildParams, - lastSentCertificateInfo *types.CertificateInfo) (*agglayer.Certificate, error) { - if certParams.IsEmpty() { - return nil, errNoBridgesAndClaims - } - - bridgeExits := a.getBridgeExits(certParams.Bridges) - importedBridgeExits, err := a.getImportedBridgeExits(ctx, certParams.Claims) - if err != nil { - return nil, fmt.Errorf("error getting imported bridge exits: %w", err) - } - - depositCount := certParams.MaxDepositCount() - - exitRoot, err := a.l2Syncer.GetExitRootByIndex(ctx, depositCount) - if err != nil { - return nil, fmt.Errorf("error getting exit root by index: %d. Error: %w", depositCount, err) - } - - height, previousLER, err := a.getNextHeightAndPreviousLER(lastSentCertificateInfo) - if err != nil { - return nil, fmt.Errorf("error getting next height and previous LER: %w", err) - } - - meta := types.NewCertificateMetadata( - certParams.FromBlock, - uint32(certParams.ToBlock-certParams.FromBlock), - certParams.CreatedAt, - ) - - return &agglayer.Certificate{ - NetworkID: a.l2Syncer.OriginNetwork(), - PrevLocalExitRoot: previousLER, - NewLocalExitRoot: exitRoot.Hash, - BridgeExits: bridgeExits, - ImportedBridgeExits: importedBridgeExits, - Height: height, - Metadata: meta.ToHash(), - }, nil -} - -// createCertificateMetadata creates the metadata for the certificate -// it returns: newMetadata + bool if the metadata is hashed or not -func convertBridgeMetadata(metadata []byte, importedBridgeMetadataAsHash bool) ([]byte, bool) { - var metaData []byte - var isMetadataHashed bool - if importedBridgeMetadataAsHash && len(metadata) > 0 { - metaData = crypto.Keccak256(metadata) - isMetadataHashed = true - } else { - metaData = metadata - isMetadataHashed = false - } - return metaData, isMetadataHashed -} - -// convertClaimToImportedBridgeExit converts a claim to an ImportedBridgeExit object -func (a *AggSender) convertClaimToImportedBridgeExit(claim bridgesync.Claim) (*agglayer.ImportedBridgeExit, error) { - leafType := agglayer.LeafTypeAsset - if claim.IsMessage { - leafType = agglayer.LeafTypeMessage - } - metaData, isMetadataIsHashed := convertBridgeMetadata(claim.Metadata, a.cfg.BridgeMetadataAsHash) - - bridgeExit := &agglayer.BridgeExit{ - LeafType: leafType, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: claim.OriginNetwork, - OriginTokenAddress: claim.OriginAddress, - }, - DestinationNetwork: claim.DestinationNetwork, - DestinationAddress: claim.DestinationAddress, - Amount: claim.Amount, - IsMetadataHashed: isMetadataIsHashed, - Metadata: metaData, - } - - mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(claim.GlobalIndex) - if err != nil { - return nil, fmt.Errorf("error decoding global index: %w", err) - } - - return &agglayer.ImportedBridgeExit{ - BridgeExit: bridgeExit, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: mainnetFlag, - RollupIndex: rollupIndex, - LeafIndex: leafIndex, - }, - }, nil -} - -// getBridgeExits converts bridges to agglayer.BridgeExit objects -func (a *AggSender) getBridgeExits(bridges []bridgesync.Bridge) []*agglayer.BridgeExit { - bridgeExits := make([]*agglayer.BridgeExit, 0, len(bridges)) - - for _, bridge := range bridges { - metaData, isMetadataHashed := convertBridgeMetadata(bridge.Metadata, a.cfg.BridgeMetadataAsHash) - bridgeExits = append(bridgeExits, &agglayer.BridgeExit{ - LeafType: agglayer.LeafType(bridge.LeafType), - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: bridge.OriginNetwork, - OriginTokenAddress: bridge.OriginAddress, - }, - DestinationNetwork: bridge.DestinationNetwork, - DestinationAddress: bridge.DestinationAddress, - Amount: bridge.Amount, - IsMetadataHashed: isMetadataHashed, - Metadata: metaData, - }) - } - - return bridgeExits -} - -// getImportedBridgeExits converts claims to agglayer.ImportedBridgeExit objects and calculates necessary proofs -func (a *AggSender) getImportedBridgeExits( - ctx context.Context, claims []bridgesync.Claim, -) ([]*agglayer.ImportedBridgeExit, error) { - if len(claims) == 0 { - // no claims to convert - return []*agglayer.ImportedBridgeExit{}, nil - } - - var ( - greatestL1InfoTreeIndexUsed uint32 - importedBridgeExits = make([]*agglayer.ImportedBridgeExit, 0, len(claims)) - claimL1Info = make([]*l1infotreesync.L1InfoTreeLeaf, 0, len(claims)) - ) - - for _, claim := range claims { - info, err := a.l1infoTreeSyncer.GetInfoByGlobalExitRoot(claim.GlobalExitRoot) - if err != nil { - return nil, fmt.Errorf("error getting info by global exit root: %w", err) - } - - claimL1Info = append(claimL1Info, info) - - if info.L1InfoTreeIndex > greatestL1InfoTreeIndexUsed { - greatestL1InfoTreeIndexUsed = info.L1InfoTreeIndex - } - } - - rootToProve, err := a.l1infoTreeSyncer.GetL1InfoTreeRootByIndex(ctx, greatestL1InfoTreeIndexUsed) - if err != nil { - return nil, fmt.Errorf("error getting L1 Info tree root by index: %d. Error: %w", greatestL1InfoTreeIndexUsed, err) - } - - for i, claim := range claims { - l1Info := claimL1Info[i] - - a.log.Debugf("claim[%d]: destAddr: %s GER: %s Block: %d Pos: %d GlobalIndex: 0x%x", - i, claim.DestinationAddress.String(), claim.GlobalExitRoot.String(), - claim.BlockNum, claim.BlockPos, claim.GlobalIndex) - ibe, err := a.convertClaimToImportedBridgeExit(claim) - if err != nil { - return nil, fmt.Errorf("error converting claim to imported bridge exit: %w", err) - } - - importedBridgeExits = append(importedBridgeExits, ibe) - - gerToL1Proof, err := a.l1infoTreeSyncer.GetL1InfoTreeMerkleProofFromIndexToRoot( - ctx, l1Info.L1InfoTreeIndex, rootToProve.Hash, - ) - if err != nil { - return nil, fmt.Errorf( - "error getting L1 Info tree merkle proof for leaf index: %d and root: %s. Error: %w", - l1Info.L1InfoTreeIndex, rootToProve.Hash, err, - ) - } - - claim := claims[i] - if ibe.GlobalIndex.MainnetFlag { - ibe.ClaimData = &agglayer.ClaimFromMainnnet{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: l1Info.L1InfoTreeIndex, - RollupExitRoot: claim.RollupExitRoot, - MainnetExitRoot: claim.MainnetExitRoot, - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: l1Info.GlobalExitRoot, - Timestamp: l1Info.Timestamp, - BlockHash: l1Info.PreviousBlockHash, - }, - }, - ProofLeafMER: &agglayer.MerkleProof{ - Root: claim.MainnetExitRoot, - Proof: claim.ProofLocalExitRoot, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: rootToProve.Hash, - Proof: gerToL1Proof, - }, - } - } else { - ibe.ClaimData = &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: l1Info.L1InfoTreeIndex, - RollupExitRoot: claim.RollupExitRoot, - MainnetExitRoot: claim.MainnetExitRoot, - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: l1Info.GlobalExitRoot, - Timestamp: l1Info.Timestamp, - BlockHash: l1Info.PreviousBlockHash, - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: tree.CalculateRoot(ibe.BridgeExit.Hash(), - claim.ProofLocalExitRoot, ibe.GlobalIndex.LeafIndex), - Proof: claim.ProofLocalExitRoot, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: claim.RollupExitRoot, - Proof: claim.ProofRollupExitRoot, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: rootToProve.Hash, - Proof: gerToL1Proof, - }, - } - } - } - - return importedBridgeExits, nil -} - -// signCertificate signs a certificate with the sequencer key -func (a *AggSender) signCertificate(certificate *agglayer.Certificate) (*agglayer.SignedCertificate, error) { - hashToSign := certificate.HashToSign() - - sig, err := crypto.Sign(hashToSign.Bytes(), a.sequencerKey) - if err != nil { - return nil, err - } - - a.log.Infof("Signed certificate. sequencer address: %s. New local exit root: %s Hash signed: %s", - crypto.PubkeyToAddress(a.sequencerKey.PublicKey).String(), - common.BytesToHash(certificate.NewLocalExitRoot[:]).String(), - hashToSign.String(), - ) - - r, s, isOddParity, err := extractSignatureData(sig) - if err != nil { - return nil, err - } - - return &agglayer.SignedCertificate{ - Certificate: certificate, - Signature: &agglayer.Signature{ - R: r, - S: s, - OddParity: isOddParity, - }, - }, nil -} - -// checkPendingCertificatesStatus checks the status of pending certificates -// and updates in the storage if it changed on agglayer -// It returns: -// bool -> if there are pending certificates -func (a *AggSender) checkPendingCertificatesStatus(ctx context.Context) bool { - pendingCertificates, err := a.storage.GetCertificatesByStatus(agglayer.NonSettledStatuses) - if err != nil { - a.log.Errorf("error getting pending certificates: %w", err) - return true - } - - a.log.Debugf("checkPendingCertificatesStatus num of pendingCertificates: %d", len(pendingCertificates)) - thereArePendingCerts := false - - for _, certificate := range pendingCertificates { - certificateHeader, err := a.aggLayerClient.GetCertificateHeader(certificate.CertificateID) - if err != nil { - a.log.Errorf("error getting certificate header of %s from agglayer: %w", - certificate.ID(), err) - return true - } - - a.log.Debugf("aggLayerClient.GetCertificateHeader status [%s] of certificate %s elapsed time:%s", - certificateHeader.Status, - certificateHeader.ID(), - certificate.ElapsedTimeSinceCreation()) - - if err := a.updateCertificateStatus(ctx, certificate, certificateHeader); err != nil { - a.log.Errorf("error updating certificate %s status in storage: %w", certificateHeader.String(), err) - return true - } - - if !certificate.IsClosed() { - a.log.Infof("certificate %s is still pending, elapsed time:%s ", - certificateHeader.ID(), certificate.ElapsedTimeSinceCreation()) - thereArePendingCerts = true - } - } - return thereArePendingCerts -} - -// updateCertificate updates the certificate status in the storage -func (a *AggSender) updateCertificateStatus(ctx context.Context, - localCert *types.CertificateInfo, - agglayerCert *agglayer.CertificateHeader) error { - if localCert.Status == agglayerCert.Status { - return nil - } - a.log.Infof("certificate %s changed status from [%s] to [%s] elapsed time: %s full_cert (agglayer): %s", - localCert.ID(), localCert.Status, agglayerCert.Status, localCert.ElapsedTimeSinceCreation(), - agglayerCert.String()) - - // That is a strange situation - if agglayerCert.Status.IsOpen() && localCert.Status.IsClosed() { - a.log.Warnf("certificate %s is reopened! from [%s] to [%s]", - localCert.ID(), localCert.Status, agglayerCert.Status) - } - - localCert.Status = agglayerCert.Status - localCert.UpdatedAt = uint32(time.Now().UTC().Unix()) - if err := a.storage.UpdateCertificate(ctx, *localCert); err != nil { - a.log.Errorf("error updating certificate %s status in storage: %w", agglayerCert.ID(), err) - return fmt.Errorf("error updating certificate. Err: %w", err) - } - return nil -} - -// shouldSendCertificate checks if a certificate should be sent at given time -// if we have pending certificates, then we wait until they are settled -func (a *AggSender) shouldSendCertificate() (bool, error) { - pendingCertificates, err := a.storage.GetCertificatesByStatus(agglayer.NonSettledStatuses) - if err != nil { - return false, fmt.Errorf("error getting pending certificates: %w", err) - } - - return len(pendingCertificates) == 0, nil -} - -// checkLastCertificateFromAgglayer checks the last certificate from agglayer -func (a *AggSender) checkLastCertificateFromAgglayer(ctx context.Context) error { - networkID := a.l2Syncer.OriginNetwork() - a.log.Infof("recovery: checking last certificate from AggLayer for network %d", networkID) - aggLayerLastCert, err := a.aggLayerClient.GetLatestKnownCertificateHeader(networkID) - if err != nil { - return fmt.Errorf("recovery: error getting latest known certificate header from agglayer: %w", err) - } - a.log.Infof("recovery: last certificate from AggLayer: %s", aggLayerLastCert.String()) - localLastCert, err := a.storage.GetLastSentCertificate() - if err != nil { - return fmt.Errorf("recovery: error getting last sent certificate from local storage: %w", err) - } - a.log.Infof("recovery: last certificate in storage: %s", localLastCert.String()) - - // CASE 1: No certificates in local storage and agglayer - if localLastCert == nil && aggLayerLastCert == nil { - a.log.Info("recovery: No certificates in local storage and agglayer: initial state") - return nil - } - // CASE 2: No certificates in local storage but agglayer has one - if localLastCert == nil && aggLayerLastCert != nil { - a.log.Info("recovery: No certificates in local storage but agglayer have one: recovery aggSender cert: %s", - aggLayerLastCert.String()) - if _, err := a.updateLocalStorageWithAggLayerCert(ctx, aggLayerLastCert); err != nil { - return fmt.Errorf("recovery: error updating local storage with agglayer certificate: %w", err) - } - return nil - } - // CASE 2.1: certificate in storage but not in agglayer - // this is a non-sense, so throw an error - if localLastCert != nil && aggLayerLastCert == nil { - return fmt.Errorf("recovery: certificate exists in storage but not in agglayer. Inconsistency") - } - // CASE 3.1: the certificate on the agglayer has less height than the one stored in the local storage - if aggLayerLastCert.Height < localLastCert.Height { - return fmt.Errorf("recovery: the last certificate in the agglayer has less height (%d) "+ - "than the one in the local storage (%d)", aggLayerLastCert.Height, localLastCert.Height) - } - // CASE 3.2: aggsender stopped between sending to agglayer and storing to the local storage - if aggLayerLastCert.Height == localLastCert.Height+1 { - a.log.Infof("recovery: AggLayer has the next cert (height: %d), so is a recovery case: storing cert: %s", - aggLayerLastCert.Height, aggLayerLastCert.String()) - // we need to store the certificate in the local storage. - localLastCert, err = a.updateLocalStorageWithAggLayerCert(ctx, aggLayerLastCert) - if err != nil { - log.Errorf("recovery: error updating certificate: %s, reason: %w", aggLayerLastCert.String(), err) - return fmt.Errorf("recovery: error updating certificate: %w", err) - } - } - // CASE 4: AggSender and AggLayer are not on the same page - // note: we don't need to check individual fields of the certificate - // because CertificateID is a hash of all the fields - if localLastCert.CertificateID != aggLayerLastCert.CertificateID { - a.log.Errorf("recovery: Local certificate:\n %s \n is different from agglayer certificate:\n %s", - localLastCert.String(), aggLayerLastCert.String()) - return fmt.Errorf("recovery: mismatch between local and agglayer certificates") - } - // CASE 5: AggSender and AggLayer are at same page - // just update status - err = a.updateCertificateStatus(ctx, localLastCert, aggLayerLastCert) - if err != nil { - a.log.Errorf("recovery: error updating status certificate: %s status: %w", aggLayerLastCert.String(), err) - return fmt.Errorf("recovery: error updating certificate status: %w", err) - } - - a.log.Infof("recovery: successfully checked last certificate from AggLayer for network %d", networkID) - return nil -} - -// updateLocalStorageWithAggLayerCert updates the local storage with the certificate from the AggLayer -func (a *AggSender) updateLocalStorageWithAggLayerCert(ctx context.Context, - aggLayerCert *agglayer.CertificateHeader) (*types.CertificateInfo, error) { - certInfo := NewCertificateInfoFromAgglayerCertHeader(aggLayerCert) - a.log.Infof("setting initial certificate from AggLayer: %s", certInfo.String()) - return certInfo, a.storage.SaveLastSentCertificate(ctx, *certInfo) -} - -// extractSignatureData extracts the R, S, and V from a 65-byte signature -func extractSignatureData(signature []byte) (r, s common.Hash, isOddParity bool, err error) { - if len(signature) != signatureSize { - err = errInvalidSignatureSize - return - } - - r = common.BytesToHash(signature[:32]) // First 32 bytes are R - s = common.BytesToHash(signature[32:64]) // Next 32 bytes are S - isOddParity = signature[64]%2 == 1 //nolint:mnd // Last byte is V - - return -} - -func NewCertificateInfoFromAgglayerCertHeader(c *agglayer.CertificateHeader) *types.CertificateInfo { - if c == nil { - return nil - } - now := uint32(time.Now().UTC().Unix()) - meta := types.NewCertificateMetadataFromHash(c.Metadata) - toBlock := meta.FromBlock + uint64(meta.Offset) - createdAt := meta.CreatedAt - - if meta.Version < 1 { - toBlock = meta.ToBlock - createdAt = now - } - - res := &types.CertificateInfo{ - Height: c.Height, - CertificateID: c.CertificateID, - NewLocalExitRoot: c.NewLocalExitRoot, - FromBlock: meta.FromBlock, - ToBlock: toBlock, - Status: c.Status, - CreatedAt: createdAt, - UpdatedAt: now, - SignedCertificate: "na/agglayer header", - } - if c.PreviousLocalExitRoot != nil { - res.PreviousLocalExitRoot = c.PreviousLocalExitRoot - } - return res -} - -// getLastSentBlockAndRetryCount returns the last sent block of the last sent certificate -// if there is no previosly sent certificate, it returns 0 and 0 -func getLastSentBlockAndRetryCount(lastSentCertificateInfo *types.CertificateInfo) (uint64, int) { - if lastSentCertificateInfo == nil { - return 0, 0 - } - - retryCount := 0 - lastSentBlock := lastSentCertificateInfo.ToBlock - - if lastSentCertificateInfo.Status == agglayer.InError { - // if the last certificate was in error, we need to resend it - // from the block before the error - if lastSentCertificateInfo.FromBlock > 0 { - lastSentBlock = lastSentCertificateInfo.FromBlock - 1 - } - - retryCount = lastSentCertificateInfo.RetryCount + 1 - } - - return lastSentBlock, retryCount -} diff --git a/aggsender/aggsender_test.go b/aggsender/aggsender_test.go deleted file mode 100644 index 02efccf17..000000000 --- a/aggsender/aggsender_test.go +++ /dev/null @@ -1,2151 +0,0 @@ -package aggsender - -import ( - "context" - "crypto/ecdsa" - "encoding/json" - "errors" - "fmt" - "math/big" - "os" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db" - "github.com/0xPolygon/cdk/aggsender/mocks" - aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - treeTypes "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -const ( - networkIDTest = uint32(1234) -) - -var ( - errTest = errors.New("unitest error") - ler1 = common.HexToHash("0x123") -) - -func TestConfigString(t *testing.T) { - config := Config{ - StoragePath: "/path/to/storage", - AggLayerURL: "http://agglayer.url", - AggsenderPrivateKey: types.KeystoreFileConfig{Path: "/path/to/key", Password: "password"}, - URLRPCL2: "http://l2.rpc.url", - BlockFinality: "latestBlock", - EpochNotificationPercentage: 50, - SaveCertificatesToFilesPath: "/path/to/certificates", - } - - expected := "StoragePath: /path/to/storage\n" + - "AggLayerURL: http://agglayer.url\n" + - "AggsenderPrivateKeyPath: /path/to/key\n" + - "URLRPCL2: http://l2.rpc.url\n" + - "BlockFinality: latestBlock\n" + - "EpochNotificationPercentage: 50\n" + - "SaveCertificatesToFilesPath: /path/to/certificates\n" - - require.Equal(t, expected, config.String()) -} - -func TestConvertClaimToImportedBridgeExit(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - claim bridgesync.Claim - expectedError bool - expectedExit *agglayer.ImportedBridgeExit - }{ - { - name: "Asset claim", - claim: bridgesync.Claim{ - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: big.NewInt(1), - }, - expectedError: false, - expectedExit: &agglayer.ImportedBridgeExit{ - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 1, - }, - }, - }, - { - name: "Message claim", - claim: bridgesync.Claim{ - IsMessage: true, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: big.NewInt(2), - }, - expectedError: false, - expectedExit: &agglayer.ImportedBridgeExit{ - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeMessage, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 2, - }, - }, - }, - { - name: "Invalid global index", - claim: bridgesync.Claim{ - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: new(big.Int).SetBytes([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}), - }, - expectedError: true, - expectedExit: nil, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggSender := &AggSender{} - exit, err := aggSender.convertClaimToImportedBridgeExit(tt.claim) - - if tt.expectedError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedExit, exit) - } - }) - } -} - -func TestGetBridgeExits(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - bridges []bridgesync.Bridge - expectedExits []*agglayer.BridgeExit - }{ - { - name: "Single bridge", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - expectedExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - }, - { - name: "Multiple bridges", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - { - LeafType: agglayer.LeafTypeMessage.Uint8(), - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x789"), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - }, - }, - expectedExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - { - LeafType: agglayer.LeafTypeMessage, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 3, - OriginTokenAddress: common.HexToAddress("0x789"), - }, - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - }, - }, - }, - { - name: "No bridges", - bridges: []bridgesync.Bridge{}, - expectedExits: []*agglayer.BridgeExit{}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggSender := &AggSender{} - exits := aggSender.getBridgeExits(tt.bridges) - - require.Equal(t, tt.expectedExits, exits) - }) - } -} - -func TestAggSenderStart(t *testing.T) { - aggLayerMock := agglayer.NewAgglayerClientMock(t) - epochNotifierMock := mocks.NewEpochNotifier(t) - bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - aggSender, err := New( - ctx, - log.WithFields("test", "unittest"), - Config{ - StoragePath: path.Join(t.TempDir(), "aggsenderTestAggSenderStart.sqlite"), - DelayBeetweenRetries: types.Duration{Duration: 1 * time.Microsecond}, - }, - aggLayerMock, - nil, - bridgeL2SyncerMock, - epochNotifierMock) - require.NoError(t, err) - require.NotNil(t, aggSender) - ch := make(chan aggsendertypes.EpochEvent) - epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) - bridgeL2SyncerMock.EXPECT().OriginNetwork().Return(uint32(1)) - bridgeL2SyncerMock.EXPECT().GetLastProcessedBlock(mock.Anything).Return(uint64(0), nil) - aggLayerMock.EXPECT().GetLatestKnownCertificateHeader(mock.Anything).Return(nil, nil) - - go aggSender.Start(ctx) - ch <- aggsendertypes.EpochEvent{ - Epoch: 1, - } - time.Sleep(200 * time.Millisecond) -} - -func TestAggSenderSendCertificates(t *testing.T) { - AggLayerMock := agglayer.NewAgglayerClientMock(t) - epochNotifierMock := mocks.NewEpochNotifier(t) - bridgeL2SyncerMock := mocks.NewL2BridgeSyncer(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - aggSender, err := New( - ctx, - log.WithFields("test", "unittest"), - Config{ - StoragePath: path.Join(t.TempDir(), "aggsenderTestAggSenderSendCertificates.sqlite"), - }, - AggLayerMock, - nil, - bridgeL2SyncerMock, - epochNotifierMock) - require.NoError(t, err) - require.NotNil(t, aggSender) - ch := make(chan aggsendertypes.EpochEvent, 2) - epochNotifierMock.EXPECT().Subscribe("aggsender").Return(ch) - err = aggSender.storage.SaveLastSentCertificate(ctx, aggsendertypes.CertificateInfo{ - Height: 1, - Status: agglayer.Pending, - }) - AggLayerMock.EXPECT().GetCertificateHeader(mock.Anything).Return(&agglayer.CertificateHeader{ - Status: agglayer.Pending, - }, nil) - require.NoError(t, err) - ch <- aggsendertypes.EpochEvent{ - Epoch: 1, - } - go aggSender.sendCertificates(ctx) - time.Sleep(200 * time.Millisecond) -} - -//nolint:dupl -func TestGetImportedBridgeExits(t *testing.T) { - t.Parallel() - - mockProof := generateTestProof(t) - - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - Timestamp: 123456789, - PreviousBlockHash: common.HexToHash("0xabc"), - GlobalExitRoot: common.HexToHash("0x7891"), - }, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( - treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, - mock.Anything, mock.Anything).Return(mockProof, nil) - - tests := []struct { - name string - claims []bridgesync.Claim - expectedError bool - expectedExits []*agglayer.ImportedBridgeExit - }{ - { - name: "Single claim", - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - GlobalIndex: bridgesync.GenerateGlobalIndex(false, 1, 1), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - }, - expectedError: false, - expectedExits: []*agglayer.ImportedBridgeExit{ - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1234"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 1, - }, - ClaimData: &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xc52019815b51acf67a715cae6794a20083d63fd9af45783b7adf69123dae92c8"), - Proof: mockProof, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xaaab"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - }, - }, - { - name: "Multiple claims", - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - GlobalIndex: big.NewInt(1), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaa"), - MainnetExitRoot: common.HexToHash("0xbbb"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - { - IsMessage: true, - OriginNetwork: 3, - OriginAddress: common.HexToAddress("0x789"), - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - GlobalIndex: bridgesync.GenerateGlobalIndex(true, 0, 2), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xbbb"), - MainnetExitRoot: common.HexToHash("0xccc"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - }, - expectedError: false, - expectedExits: []*agglayer.ImportedBridgeExit{ - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 1, - }, - ClaimData: &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xaaa"), - MainnetExitRoot: common.HexToHash("0xbbb"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: common.HexToHash("0x105e0f1144e57f6fb63f1dfc5083b1f59be3512be7cf5e63523779ad14a4d987"), - Proof: mockProof, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xaaa"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeMessage, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 3, - OriginTokenAddress: common.HexToAddress("0x789"), - }, - DestinationNetwork: 4, - DestinationAddress: common.HexToAddress("0xabc"), - Amount: big.NewInt(200), - Metadata: []byte("data"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: true, - RollupIndex: 0, - LeafIndex: 2, - }, - ClaimData: &agglayer.ClaimFromMainnnet{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xbbb"), - MainnetExitRoot: common.HexToHash("0xccc"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafMER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xccc"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - }, - }, - { - name: "No claims", - claims: []bridgesync.Claim{}, - expectedError: false, - expectedExits: []*agglayer.ImportedBridgeExit{}, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggSender := &AggSender{ - l1infoTreeSyncer: mockL1InfoTreeSyncer, - log: log.WithFields("test", "unittest"), - } - exits, err := aggSender.getImportedBridgeExits(context.Background(), tt.claims) - - if tt.expectedError { - require.Error(t, err) - require.Nil(t, exits) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedExits, exits) - } - }) - } -} - -func TestBuildCertificate(t *testing.T) { - mockL2BridgeSyncer := mocks.NewL2BridgeSyncer(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) - mockProof := generateTestProof(t) - - tests := []struct { - name string - bridges []bridgesync.Bridge - claims []bridgesync.Claim - lastSentCertificateInfo aggsendertypes.CertificateInfo - fromBlock uint64 - toBlock uint64 - mockFn func() - expectedCert *agglayer.Certificate - expectedError bool - }{ - { - name: "Valid certificate with bridges and claims", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - DepositCount: 1, - }, - }, - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - GlobalIndex: big.NewInt(1), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - ProofLocalExitRoot: mockProof, - ProofRollupExitRoot: mockProof, - }, - }, - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - Status: agglayer.Settled, - }, - fromBlock: 0, - toBlock: 10, - expectedCert: &agglayer.Certificate{ - NetworkID: 1, - PrevLocalExitRoot: common.HexToHash("0x123"), - NewLocalExitRoot: common.HexToHash("0x789"), - Metadata: aggsendertypes.NewCertificateMetadata(0, 10, 0).ToHash(), - BridgeExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x123"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ - { - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1234"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - }, - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 0, - LeafIndex: 1, - }, - ClaimData: &agglayer.ClaimFromRollup{ - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x7891"), - Timestamp: 123456789, - BlockHash: common.HexToHash("0xabc"), - }, - }, - ProofLeafLER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xc52019815b51acf67a715cae6794a20083d63fd9af45783b7adf69123dae92c8"), - Proof: mockProof, - }, - ProofLERToRER: &agglayer.MerkleProof{ - Root: common.HexToHash("0xaaab"), - Proof: mockProof, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x7891"), - Proof: mockProof, - }, - }, - }, - }, - Height: 2, - }, - mockFn: func() { - mockL2BridgeSyncer.On("OriginNetwork").Return(uint32(1)) - mockL2BridgeSyncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x789")}, nil) - - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - Timestamp: 123456789, - PreviousBlockHash: common.HexToHash("0xabc"), - GlobalExitRoot: common.HexToHash("0x7891"), - }, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything).Return(mockProof, nil) - }, - expectedError: false, - }, - { - name: "No bridges or claims", - bridges: []bridgesync.Bridge{}, - claims: []bridgesync.Claim{}, - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - }, - expectedCert: nil, - expectedError: true, - }, - { - name: "Error getting imported bridge exits", - bridges: []bridgesync.Bridge{ - { - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x123"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x456"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - DepositCount: 1, - }, - }, - claims: []bridgesync.Claim{ - { - IsMessage: false, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1234"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x4567"), - Amount: big.NewInt(111), - Metadata: []byte("metadata1"), - GlobalIndex: new(big.Int).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), - GlobalExitRoot: common.HexToHash("0x7891"), - RollupExitRoot: common.HexToHash("0xaaab"), - MainnetExitRoot: common.HexToHash("0xbbba"), - ProofLocalExitRoot: mockProof, - }, - }, - lastSentCertificateInfo: aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - }, - mockFn: func() { - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - Timestamp: 123456789, - PreviousBlockHash: common.HexToHash("0xabc"), - GlobalExitRoot: common.HexToHash("0x7891"), - }, nil) - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return( - treeTypes.Root{Hash: common.HexToHash("0x7891")}, nil) - }, - expectedCert: nil, - expectedError: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - mockL1InfoTreeSyncer.ExpectedCalls = nil - mockL2BridgeSyncer.ExpectedCalls = nil - - if tt.mockFn != nil { - tt.mockFn() - } - - aggSender := &AggSender{ - l2Syncer: mockL2BridgeSyncer, - l1infoTreeSyncer: mockL1InfoTreeSyncer, - log: log.WithFields("test", "unittest"), - } - - certParam := &aggsendertypes.CertificateBuildParams{ - ToBlock: tt.toBlock, - Bridges: tt.bridges, - Claims: tt.claims, - } - cert, err := aggSender.buildCertificate(context.Background(), certParam, &tt.lastSentCertificateInfo) - - if tt.expectedError { - require.Error(t, err) - require.Nil(t, cert) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedCert, cert) - } - }) - } -} - -func generateTestProof(t *testing.T) treeTypes.Proof { - t.Helper() - - proof := treeTypes.Proof{} - - for i := 0; i < int(treeTypes.DefaultHeight) && i < 10; i++ { - proof[i] = common.HexToHash(fmt.Sprintf("0x%d", i)) - } - - return proof -} - -func TestCheckIfCertificatesAreSettled(t *testing.T) { - tests := []struct { - name string - pendingCertificates []*aggsendertypes.CertificateInfo - certificateHeaders map[common.Hash]*agglayer.CertificateHeader - getFromDBError error - clientError error - updateDBError error - expectedErrorLogMessages []string - expectedInfoMessages []string - expectedError bool - }{ - { - name: "All certificates settled - update successful", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - {CertificateID: common.HexToHash("0x2"), Height: 2}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.Settled}, - common.HexToHash("0x2"): {Status: agglayer.Settled}, - }, - expectedInfoMessages: []string{ - "certificate %s changed status to %s", - }, - }, - { - name: "Some certificates in error - update successful", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - {CertificateID: common.HexToHash("0x2"), Height: 2}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.InError}, - common.HexToHash("0x2"): {Status: agglayer.Settled}, - }, - expectedInfoMessages: []string{ - "certificate %s changed status to %s", - }, - }, - { - name: "Error getting pending certificates", - getFromDBError: fmt.Errorf("storage error"), - expectedErrorLogMessages: []string{ - "error getting pending certificates: %w", - }, - expectedError: true, - }, - { - name: "Error getting certificate header", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.InError}, - }, - clientError: fmt.Errorf("client error"), - expectedErrorLogMessages: []string{ - "error getting header of certificate %s with height: %d from agglayer: %w", - }, - expectedError: true, - }, - { - name: "Error updating certificate status", - pendingCertificates: []*aggsendertypes.CertificateInfo{ - {CertificateID: common.HexToHash("0x1"), Height: 1}, - }, - certificateHeaders: map[common.Hash]*agglayer.CertificateHeader{ - common.HexToHash("0x1"): {Status: agglayer.Settled}, - }, - updateDBError: fmt.Errorf("update error"), - expectedErrorLogMessages: []string{ - "error updating certificate status in storage: %w", - }, - expectedInfoMessages: []string{ - "certificate %s changed status to %s", - }, - expectedError: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - mockStorage := mocks.NewAggSenderStorage(t) - mockAggLayerClient := agglayer.NewAgglayerClientMock(t) - mockLogger := log.WithFields("test", "unittest") - - mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses).Return( - tt.pendingCertificates, tt.getFromDBError) - for certID, header := range tt.certificateHeaders { - mockAggLayerClient.On("GetCertificateHeader", certID).Return(header, tt.clientError) - } - if tt.updateDBError != nil { - mockStorage.On("UpdateCertificate", mock.Anything, mock.Anything).Return(tt.updateDBError) - } else if tt.clientError == nil && tt.getFromDBError == nil { - mockStorage.On("UpdateCertificate", mock.Anything, mock.Anything).Return(nil) - } - - aggSender := &AggSender{ - log: mockLogger, - storage: mockStorage, - aggLayerClient: mockAggLayerClient, - cfg: Config{}, - } - - ctx := context.TODO() - thereArePendingCerts := aggSender.checkPendingCertificatesStatus(ctx) - require.Equal(t, tt.expectedError, thereArePendingCerts) - mockAggLayerClient.AssertExpectations(t) - mockStorage.AssertExpectations(t) - }) - } -} - -func TestSendCertificate(t *testing.T) { - t.Parallel() - - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - - type testCfg struct { - name string - sequencerKey *ecdsa.PrivateKey - shouldSendCertificate []interface{} - getLastSentCertificate []interface{} - lastL2BlockProcessed []interface{} - getBridges []interface{} - getClaims []interface{} - getInfoByGlobalExitRoot []interface{} - getL1InfoTreeRootByIndex []interface{} - getL1InfoTreeMerkleProofFromIndexToRoot []interface{} - getExitRootByIndex []interface{} - originNetwork []interface{} - sendCertificate []interface{} - saveLastSentCertificate []interface{} - expectedError string - } - - setupTest := func(cfg testCfg) (*AggSender, *mocks.AggSenderStorage, *mocks.L2BridgeSyncer, - *agglayer.AgglayerClientMock, *mocks.L1InfoTreeSyncer) { - var ( - aggsender = &AggSender{ - log: log.WithFields("aggsender", 1), - cfg: Config{MaxRetriesStoreCertificate: 1}, - sequencerKey: cfg.sequencerKey, - } - mockStorage *mocks.AggSenderStorage - mockL2Syncer *mocks.L2BridgeSyncer - mockAggLayerClient *agglayer.AgglayerClientMock - mockL1InfoTreeSyncer *mocks.L1InfoTreeSyncer - ) - - if cfg.shouldSendCertificate != nil || cfg.getLastSentCertificate != nil || - cfg.saveLastSentCertificate != nil { - mockStorage = mocks.NewAggSenderStorage(t) - mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses). - Return(cfg.shouldSendCertificate...) - - aggsender.storage = mockStorage - - if cfg.getLastSentCertificate != nil { - mockStorage.On("GetLastSentCertificate").Return(cfg.getLastSentCertificate...).Once() - } - - if cfg.saveLastSentCertificate != nil { - mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(cfg.saveLastSentCertificate...) - } - } - - if cfg.lastL2BlockProcessed != nil || cfg.originNetwork != nil || - cfg.getBridges != nil || cfg.getClaims != nil || cfg.getInfoByGlobalExitRoot != nil { - mockL2Syncer = mocks.NewL2BridgeSyncer(t) - - mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(cfg.lastL2BlockProcessed...).Once() - - if cfg.getBridges != nil { - mockL2Syncer.On("GetBridgesPublished", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getBridges...) - } - - if cfg.getClaims != nil { - mockL2Syncer.On("GetClaims", mock.Anything, mock.Anything, mock.Anything).Return(cfg.getClaims...).Once() - } - - if cfg.getExitRootByIndex != nil { - mockL2Syncer.On("GetExitRootByIndex", mock.Anything, mock.Anything).Return(cfg.getExitRootByIndex...).Once() - } - - if cfg.originNetwork != nil { - mockL2Syncer.On("OriginNetwork").Return(cfg.originNetwork...).Once() - } - - aggsender.l2Syncer = mockL2Syncer - } - - if cfg.sendCertificate != nil { - mockAggLayerClient = agglayer.NewAgglayerClientMock(t) - mockAggLayerClient.On("SendCertificate", mock.Anything).Return(cfg.sendCertificate...).Once() - - aggsender.aggLayerClient = mockAggLayerClient - } - - if cfg.getInfoByGlobalExitRoot != nil || - cfg.getL1InfoTreeRootByIndex != nil || cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { - mockL1InfoTreeSyncer = mocks.NewL1InfoTreeSyncer(t) - mockL1InfoTreeSyncer.On("GetInfoByGlobalExitRoot", mock.Anything).Return(cfg.getInfoByGlobalExitRoot...).Once() - - if cfg.getL1InfoTreeRootByIndex != nil { - mockL1InfoTreeSyncer.On("GetL1InfoTreeRootByIndex", mock.Anything, mock.Anything).Return(cfg.getL1InfoTreeRootByIndex...).Once() - } - - if cfg.getL1InfoTreeMerkleProofFromIndexToRoot != nil { - mockL1InfoTreeSyncer.On("GetL1InfoTreeMerkleProofFromIndexToRoot", mock.Anything, mock.Anything, mock.Anything). - Return(cfg.getL1InfoTreeMerkleProofFromIndexToRoot...).Once() - } - - aggsender.l1infoTreeSyncer = mockL1InfoTreeSyncer - } - - return aggsender, mockStorage, mockL2Syncer, mockAggLayerClient, mockL1InfoTreeSyncer - } - - tests := []testCfg{ - { - name: "error getting pending certificates", - shouldSendCertificate: []interface{}{nil, errors.New("error getting pending")}, - expectedError: "error getting pending", - }, - { - name: "should not send certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{ - {Status: agglayer.Pending}, - }, nil}, - }, - { - name: "error getting last sent certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(8), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{}, errors.New("error getting last sent certificate")}, - expectedError: "error getting last sent certificate", - }, - { - name: "no new blocks to send certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(41), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 41, - CertificateID: common.HexToHash("0x111"), - NewLocalExitRoot: common.HexToHash("0x13223"), - FromBlock: 31, - ToBlock: 41, - }, nil}, - }, - { - name: "get bridges error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(59), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 50, - CertificateID: common.HexToHash("0x1111"), - NewLocalExitRoot: common.HexToHash("0x132233"), - FromBlock: 40, - ToBlock: 41, - }, nil}, - getBridges: []interface{}{nil, errors.New("error getting bridges")}, - expectedError: "error getting bridges", - }, - { - name: "no bridges", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(69), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 60, - CertificateID: common.HexToHash("0x11111"), - NewLocalExitRoot: common.HexToHash("0x1322233"), - FromBlock: 50, - ToBlock: 51, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{}, nil}, - }, - { - name: "get claims error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(79), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 70, - CertificateID: common.HexToHash("0x121111"), - NewLocalExitRoot: common.HexToHash("0x13122233"), - FromBlock: 60, - ToBlock: 61, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 61, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{nil, errors.New("error getting claims")}, - expectedError: "error getting claims", - }, - { - name: "error getting info by global exit root", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 80, - CertificateID: common.HexToHash("0x1321111"), - NewLocalExitRoot: common.HexToHash("0x131122233"), - FromBlock: 70, - ToBlock: 71, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 71, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{ - { - IsMessage: false, - }, - }, nil}, - getInfoByGlobalExitRoot: []interface{}{nil, errors.New("error getting info by global exit root")}, - expectedError: "error getting info by global exit root", - }, - { - name: "error getting L1 Info tree root by index", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 80, - CertificateID: common.HexToHash("0x1321111"), - NewLocalExitRoot: common.HexToHash("0x131122233"), - FromBlock: 70, - ToBlock: 71, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 71, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{ - { - IsMessage: false, - }, - }, nil}, - getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - BlockNumber: 1, - BlockPosition: 0, - PreviousBlockHash: common.HexToHash("0x123"), - Timestamp: 123456789, - MainnetExitRoot: common.HexToHash("0xccc"), - RollupExitRoot: common.HexToHash("0xddd"), - GlobalExitRoot: common.HexToHash("0xeee"), - }, nil}, - getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{}, errors.New("error getting L1 Info tree root by index")}, - expectedError: "error getting L1 Info tree root by index", - }, - { - name: "error getting L1 Info tree merkle proof from index to root", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(89), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 80, - CertificateID: common.HexToHash("0x1321111"), - NewLocalExitRoot: common.HexToHash("0x131122233"), - FromBlock: 70, - ToBlock: 71, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 71, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{ - { - IsMessage: false, - GlobalIndex: big.NewInt(1), - }, - }, nil}, - getInfoByGlobalExitRoot: []interface{}{&l1infotreesync.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - BlockNumber: 1, - BlockPosition: 0, - PreviousBlockHash: common.HexToHash("0x123"), - Timestamp: 123456789, - MainnetExitRoot: common.HexToHash("0xccc"), - RollupExitRoot: common.HexToHash("0xddd"), - GlobalExitRoot: common.HexToHash("0xeee"), - }, nil}, - getL1InfoTreeRootByIndex: []interface{}{treeTypes.Root{Hash: common.HexToHash("0xeee")}, nil}, - getL1InfoTreeMerkleProofFromIndexToRoot: []interface{}{treeTypes.Proof{}, errors.New("error getting L1 Info tree merkle proof")}, - expectedError: "error getting L1 Info tree merkle proof for leaf index", - }, - { - name: "send certificate error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(99), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 90, - CertificateID: common.HexToHash("0x1121111"), - NewLocalExitRoot: common.HexToHash("0x111122211"), - PreviousLocalExitRoot: &ler1, - FromBlock: 80, - ToBlock: 81, - Status: agglayer.Settled, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 81, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - DepositCount: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{}, nil}, - getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, - originNetwork: []interface{}{uint32(1), nil}, - sendCertificate: []interface{}{common.Hash{}, errors.New("error sending certificate")}, - sequencerKey: privateKey, - expectedError: "error sending certificate", - }, - { - name: "store last sent certificate error", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(109), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 100, - CertificateID: common.HexToHash("0x11121111"), - NewLocalExitRoot: common.HexToHash("0x1211122211"), - FromBlock: 90, - ToBlock: 91, - Status: agglayer.Settled, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 91, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - DepositCount: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{}, nil}, - getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, - originNetwork: []interface{}{uint32(1), nil}, - sendCertificate: []interface{}{common.Hash{}, nil}, - saveLastSentCertificate: []interface{}{errors.New("error saving last sent certificate in db")}, - sequencerKey: privateKey, - expectedError: "error saving last sent certificate in db", - }, - { - name: "successful sending of certificate", - shouldSendCertificate: []interface{}{[]*aggsendertypes.CertificateInfo{}, nil}, - lastL2BlockProcessed: []interface{}{uint64(119), nil}, - getLastSentCertificate: []interface{}{&aggsendertypes.CertificateInfo{ - Height: 110, - CertificateID: common.HexToHash("0x12121111"), - NewLocalExitRoot: common.HexToHash("0x1221122211"), - FromBlock: 100, - ToBlock: 101, - Status: agglayer.Settled, - }, nil}, - getBridges: []interface{}{[]bridgesync.Bridge{ - { - BlockNum: 101, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - DepositCount: 1, - }, - }, nil}, - getClaims: []interface{}{[]bridgesync.Claim{}, nil}, - getExitRootByIndex: []interface{}{treeTypes.Root{}, nil}, - originNetwork: []interface{}{uint32(1), nil}, - sendCertificate: []interface{}{common.Hash{}, nil}, - saveLastSentCertificate: []interface{}{nil}, - sequencerKey: privateKey, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - aggsender, mockStorage, mockL2Syncer, - mockAggLayerClient, mockL1InfoTreeSyncer := setupTest(tt) - - _, err := aggsender.sendCertificate(context.Background()) - - if tt.expectedError != "" { - require.ErrorContains(t, err, tt.expectedError) - } else { - require.NoError(t, err) - } - - if mockStorage != nil { - mockStorage.AssertExpectations(t) - } - - if mockL2Syncer != nil { - mockL2Syncer.AssertExpectations(t) - } - - if mockAggLayerClient != nil { - mockAggLayerClient.AssertExpectations(t) - } - - if mockL1InfoTreeSyncer != nil { - mockL1InfoTreeSyncer.AssertExpectations(t) - } - }) - } -} - -func TestExtractSignatureData(t *testing.T) { - t.Parallel() - - testR := common.HexToHash("0x1") - testV := common.HexToHash("0x2") - - tests := []struct { - name string - signature []byte - expectedR common.Hash - expectedS common.Hash - expectedOddParity bool - expectedError error - }{ - { - name: "Valid signature - odd parity", - signature: append(append(testR.Bytes(), testV.Bytes()...), 1), - expectedR: testR, - expectedS: testV, - expectedOddParity: true, - expectedError: nil, - }, - { - name: "Valid signature - even parity", - signature: append(append(testR.Bytes(), testV.Bytes()...), 2), - expectedR: testR, - expectedS: testV, - expectedOddParity: false, - expectedError: nil, - }, - { - name: "Invalid signature size", - signature: make([]byte, 64), // Invalid size - expectedError: errInvalidSignatureSize, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - r, s, isOddParity, err := extractSignatureData(tt.signature) - - if tt.expectedError != nil { - require.Error(t, err) - require.Equal(t, tt.expectedError, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedR, r) - require.Equal(t, tt.expectedS, s) - require.Equal(t, tt.expectedOddParity, isOddParity) - } - }) - } -} - -func TestExploratoryGenerateCert(t *testing.T) { - t.Skip("This test is only for exploratory purposes, to generate json format of the certificate") - - key, err := crypto.GenerateKey() - require.NoError(t, err) - - signature, err := crypto.Sign(common.HexToHash("0x1").Bytes(), key) - require.NoError(t, err) - - r, s, v, err := extractSignatureData(signature) - require.NoError(t, err) - - certificate := &agglayer.SignedCertificate{ - Certificate: &agglayer.Certificate{ - NetworkID: 1, - Height: 1, - PrevLocalExitRoot: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - BridgeExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x11"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x22"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - ImportedBridgeExits: []*agglayer.ImportedBridgeExit{ - { - GlobalIndex: &agglayer.GlobalIndex{ - MainnetFlag: false, - RollupIndex: 1, - LeafIndex: 11, - }, - BridgeExit: &agglayer.BridgeExit{ - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x11"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x22"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - ClaimData: &agglayer.ClaimFromMainnnet{ - ProofLeafMER: &agglayer.MerkleProof{ - Root: common.HexToHash("0x1"), - Proof: [32]common.Hash{}, - }, - ProofGERToL1Root: &agglayer.MerkleProof{ - Root: common.HexToHash("0x3"), - Proof: [32]common.Hash{}, - }, - L1Leaf: &agglayer.L1InfoTreeLeaf{ - L1InfoTreeIndex: 1, - RollupExitRoot: common.HexToHash("0x4"), - MainnetExitRoot: common.HexToHash("0x5"), - Inner: &agglayer.L1InfoTreeLeafInner{ - GlobalExitRoot: common.HexToHash("0x6"), - BlockHash: common.HexToHash("0x7"), - Timestamp: 1231, - }, - }, - }, - }, - }, - }, - Signature: &agglayer.Signature{ - R: r, - S: s, - OddParity: v, - }, - } - - file, err := os.Create("test.json") - require.NoError(t, err) - - defer file.Close() - - encoder := json.NewEncoder(file) - encoder.SetIndent("", " ") - require.NoError(t, encoder.Encode(certificate)) -} - -func TestGetNextHeightAndPreviousLER(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - lastSentCertificateInfo *aggsendertypes.CertificateInfo - lastSettleCertificateInfoCall bool - lastSettleCertificateInfo *aggsendertypes.CertificateInfo - lastSettleCertificateInfoError error - expectedHeight uint64 - expectedPreviousLER common.Hash - expectedError bool - }{ - { - name: "Normal case", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.Settled, - }, - expectedHeight: 11, - expectedPreviousLER: common.HexToHash("0x123"), - }, - { - name: "First certificate", - lastSentCertificateInfo: nil, - expectedHeight: 0, - expectedPreviousLER: zeroLER, - }, - { - name: "First certificate error, with prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 0, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - PreviousLocalExitRoot: &ler1, - }, - expectedHeight: 0, - expectedPreviousLER: ler1, - }, - { - name: "First certificate error, no prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 0, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - expectedHeight: 0, - expectedPreviousLER: zeroLER, - }, - { - name: "n certificate error, prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - PreviousLocalExitRoot: &ler1, - Status: agglayer.InError, - }, - expectedHeight: 10, - expectedPreviousLER: ler1, - }, - { - name: "last cert not closed, error", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - PreviousLocalExitRoot: &ler1, - Status: agglayer.Pending, - }, - expectedHeight: 10, - expectedPreviousLER: ler1, - expectedError: true, - }, - { - name: "Previous certificate in error, no prevLER", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 9, - NewLocalExitRoot: common.HexToHash("0x3456"), - Status: agglayer.Settled, - }, - expectedHeight: 10, - expectedPreviousLER: common.HexToHash("0x3456"), - }, - { - name: "Previous certificate in error, no prevLER. Error getting previous cert", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfo: nil, - lastSettleCertificateInfoError: errors.New("error getting last settle certificate"), - expectedError: true, - }, - { - name: "Previous certificate in error, no prevLER. prev cert not available on storage", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfoCall: true, - lastSettleCertificateInfo: nil, - lastSettleCertificateInfoError: nil, - expectedError: true, - }, - { - name: "Previous certificate in error, no prevLER. prev cert not available on storage", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 10, - NewLocalExitRoot: common.HexToHash("0x123"), - Status: agglayer.InError, - }, - lastSettleCertificateInfo: &aggsendertypes.CertificateInfo{ - Height: 9, - NewLocalExitRoot: common.HexToHash("0x3456"), - Status: agglayer.InError, - }, - lastSettleCertificateInfoError: nil, - expectedError: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - storageMock := mocks.NewAggSenderStorage(t) - aggSender := &AggSender{log: log.WithFields("aggsender-test", "getNextHeightAndPreviousLER"), storage: storageMock} - if tt.lastSettleCertificateInfoCall || tt.lastSettleCertificateInfo != nil || tt.lastSettleCertificateInfoError != nil { - storageMock.EXPECT().GetCertificateByHeight(mock.Anything).Return(tt.lastSettleCertificateInfo, tt.lastSettleCertificateInfoError).Once() - } - height, previousLER, err := aggSender.getNextHeightAndPreviousLER(tt.lastSentCertificateInfo) - if tt.expectedError { - require.Error(t, err) - } else { - require.NoError(t, err) - require.Equal(t, tt.expectedHeight, height) - require.Equal(t, tt.expectedPreviousLER, previousLER) - } - }) - } -} - -func TestSendCertificate_NoClaims(t *testing.T) { - privateKey, err := crypto.GenerateKey() - require.NoError(t, err) - - ctx := context.Background() - mockStorage := mocks.NewAggSenderStorage(t) - mockL2Syncer := mocks.NewL2BridgeSyncer(t) - mockAggLayerClient := agglayer.NewAgglayerClientMock(t) - mockL1InfoTreeSyncer := mocks.NewL1InfoTreeSyncer(t) - - aggSender := &AggSender{ - log: log.WithFields("aggsender-test", "no claims test"), - storage: mockStorage, - l2Syncer: mockL2Syncer, - aggLayerClient: mockAggLayerClient, - l1infoTreeSyncer: mockL1InfoTreeSyncer, - sequencerKey: privateKey, - cfg: Config{}, - } - - mockStorage.On("GetCertificatesByStatus", agglayer.NonSettledStatuses).Return([]*aggsendertypes.CertificateInfo{}, nil).Once() - mockStorage.On("GetLastSentCertificate").Return(&aggsendertypes.CertificateInfo{ - NewLocalExitRoot: common.HexToHash("0x123"), - Height: 1, - FromBlock: 0, - ToBlock: 10, - Status: agglayer.Settled, - }, nil).Once() - mockStorage.On("SaveLastSentCertificate", mock.Anything, mock.Anything).Return(nil).Once() - mockL2Syncer.On("GetLastProcessedBlock", mock.Anything).Return(uint64(50), nil) - mockL2Syncer.On("GetBridgesPublished", mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Bridge{ - { - BlockNum: 30, - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("0x1"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x2"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - DepositCount: 1, - }, - }, nil) - mockL2Syncer.On("GetClaims", mock.Anything, uint64(11), uint64(50)).Return([]bridgesync.Claim{}, nil) - mockL2Syncer.On("GetExitRootByIndex", mock.Anything, uint32(1)).Return(treeTypes.Root{}, nil).Once() - mockL2Syncer.On("OriginNetwork").Return(uint32(1), nil).Once() - mockAggLayerClient.On("SendCertificate", mock.Anything).Return(common.Hash{}, nil).Once() - - signedCertificate, err := aggSender.sendCertificate(ctx) - require.NoError(t, err) - require.NotNil(t, signedCertificate) - require.NotNil(t, signedCertificate.Signature) - require.NotNil(t, signedCertificate.Certificate) - require.NotNil(t, signedCertificate.Certificate.ImportedBridgeExits) - require.Len(t, signedCertificate.Certificate.BridgeExits, 1) - - mockStorage.AssertExpectations(t) - mockL2Syncer.AssertExpectations(t) - mockAggLayerClient.AssertExpectations(t) - mockL1InfoTreeSyncer.AssertExpectations(t) -} - -func TestExtractFromCertificateMetadataToBlock(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - metadata common.Hash - expected aggsendertypes.CertificateMetadata - }{ - { - name: "Valid metadata", - metadata: aggsendertypes.NewCertificateMetadata(0, 1000, 123567890).ToHash(), - expected: aggsendertypes.CertificateMetadata{ - Version: 1, - FromBlock: 0, - Offset: 1000, - CreatedAt: 123567890, - }, - }, - { - name: "Zero metadata", - metadata: aggsendertypes.NewCertificateMetadata(0, 0, 0).ToHash(), - expected: aggsendertypes.CertificateMetadata{ - Version: 1, - FromBlock: 0, - Offset: 0, - CreatedAt: 0, - }, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - result := *aggsendertypes.NewCertificateMetadataFromHash(tt.metadata) - require.Equal(t, tt.expected, result) - }) - } -} - -func TestCheckLastCertificateFromAgglayer_ErrorAggLayer(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, fmt.Errorf("unittest error")).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -func TestCheckLastCertificateFromAgglayer_ErrorStorageGetLastSentCertificate(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(nil, fmt.Errorf("unittest error")) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// TestCheckLastCertificateFromAgglayer_Case1NoCerts -// CASE 1: No certificates in local storage and agglayer -// Aggsender and agglayer are empty so it's ok -func TestCheckLastCertificateFromAgglayer_Case1NoCerts(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagNone) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest).Return(nil, nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemote -// CASE 2: No certificates in local storage but agglayer has one -// The local DB is empty and we set the lastCert reported by AggLayer -func TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemote(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagNone) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) - localCert, err := testData.sut.storage.GetLastSentCertificate() - require.NoError(t, err) - require.Equal(t, testData.testCerts[0].CertificateID, localCert.CertificateID) -} - -// TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemoteErrorStorage -// sub case of previous one that fails to update local storage -func TestCheckLastCertificateFromAgglayer_Case2NoCertLocalCertRemoteErrorStorage(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(nil, nil) - testData.storageMock.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(errTest).Once() - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// CASE 2.1: certificate in storage but not in agglayer -// sub case of previous one that fails to update local storage -func TestCheckLastCertificateFromAgglayer_Case2_1NoCertRemoteButCertLocal(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(nil, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// CASE 3.1: the certificate on the agglayer has less height than the one stored in the local storage -func TestCheckLastCertificateFromAgglayer_Case3_1LessHeight(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[1], nil) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.ErrorContains(t, err, "recovery: the last certificate in the agglayer has less height (1) than the one in the local storage (2)") -} - -// CASE 3.2: AggSender and AggLayer not same height. AggLayer has a new certificate -func TestCheckLastCertificateFromAgglayer_Case3_2Mismatch(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[1], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - testData.storageMock.EXPECT().SaveLastSentCertificate(mock.Anything, mock.Anything).Return(nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// CASE 4: AggSender and AggLayer not same certificateID -func TestCheckLastCertificateFromAgglayer_Case4Mismatch(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[1], nil) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -// CASE 5: AggSender and AggLayer same certificateID and same status -func TestCheckLastCertificateFromAgglayer_Case5SameStatus(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest), nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// CASE 5: AggSender and AggLayer same certificateID and differ on status -func TestCheckLastCertificateFromAgglayer_Case5UpdateStatus(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - aggLayerCert := certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest) - aggLayerCert.Status = agglayer.Settled - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(aggLayerCert, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - testData.storageMock.EXPECT().UpdateCertificate(mock.Anything, mock.Anything).Return(nil).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.NoError(t, err) -} - -// CASE 4: AggSender and AggLayer same certificateID and differ on status but fails update -func TestCheckLastCertificateFromAgglayer_Case4ErrorUpdateStatus(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.l2syncerMock.EXPECT().OriginNetwork().Return(networkIDTest).Once() - aggLayerCert := certInfoToCertHeader(t, &testData.testCerts[0], networkIDTest) - aggLayerCert.Status = agglayer.Settled - testData.agglayerClientMock.EXPECT().GetLatestKnownCertificateHeader(networkIDTest). - Return(aggLayerCert, nil).Once() - testData.storageMock.EXPECT().GetLastSentCertificate().Return(&testData.testCerts[0], nil) - testData.storageMock.EXPECT().UpdateCertificate(mock.Anything, mock.Anything).Return(errTest).Once() - - err := testData.sut.checkLastCertificateFromAgglayer(testData.ctx) - - require.Error(t, err) -} - -func TestLimitSize_FirstOneFit(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(20), - Bridges: NewBridgesData(t, 1, []uint64{1}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, certParams, newCert) -} - -func TestLimitSize_FirstMinusOneFit(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.sut.cfg.MaxCertSize = (aggsendertypes.EstimatedSizeBridgeExit * 3) + 1 - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(20), - Bridges: NewBridgesData(t, 0, []uint64{19, 19, 19, 20}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, uint64(19), newCert.ToBlock) -} - -func TestLimitSize_NoWayToFitInMaxSize(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.sut.cfg.MaxCertSize = (aggsendertypes.EstimatedSizeBridgeExit * 2) + 1 - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(20), - Bridges: NewBridgesData(t, 0, []uint64{19, 19, 19, 20}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, uint64(19), newCert.ToBlock) -} - -func TestLimitSize_MinNumBlocks(t *testing.T) { - testData := newAggsenderTestData(t, testDataFlagMockStorage) - testData.sut.cfg.MaxCertSize = (aggsendertypes.EstimatedSizeBridgeExit * 2) + 1 - certParams := &aggsendertypes.CertificateBuildParams{ - FromBlock: uint64(1), - ToBlock: uint64(2), - Bridges: NewBridgesData(t, 0, []uint64{1, 1, 1, 2, 2, 2}), - } - newCert, err := testData.sut.limitCertSize(certParams) - require.NoError(t, err) - require.Equal(t, uint64(1), newCert.ToBlock) -} - -func TestGetLastSentBlockAndRetryCount(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - lastSentCertificateInfo *aggsendertypes.CertificateInfo - expectedBlock uint64 - expectedRetryCount int - }{ - { - name: "No last sent certificate", - lastSentCertificateInfo: nil, - expectedBlock: 0, - expectedRetryCount: 0, - }, - { - name: "Last sent certificate with no error", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - ToBlock: 10, - Status: agglayer.Settled, - }, - expectedBlock: 10, - expectedRetryCount: 0, - }, - { - name: "Last sent certificate with error and non-zero FromBlock", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - FromBlock: 5, - ToBlock: 10, - Status: agglayer.InError, - RetryCount: 1, - }, - expectedBlock: 4, - expectedRetryCount: 2, - }, - { - name: "Last sent certificate with error and zero FromBlock", - lastSentCertificateInfo: &aggsendertypes.CertificateInfo{ - FromBlock: 0, - ToBlock: 10, - Status: agglayer.InError, - RetryCount: 1, - }, - expectedBlock: 10, - expectedRetryCount: 2, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - block, retryCount := getLastSentBlockAndRetryCount(tt.lastSentCertificateInfo) - - require.Equal(t, tt.expectedBlock, block) - require.Equal(t, tt.expectedRetryCount, retryCount) - }) - } -} - -type testDataFlags = int - -const ( - testDataFlagNone testDataFlags = 0 - testDataFlagMockStorage testDataFlags = 1 -) - -type aggsenderTestData struct { - ctx context.Context - agglayerClientMock *agglayer.AgglayerClientMock - l2syncerMock *mocks.L2BridgeSyncer - l1InfoTreeSyncerMock *mocks.L1InfoTreeSyncer - storageMock *mocks.AggSenderStorage - sut *AggSender - testCerts []aggsendertypes.CertificateInfo -} - -func NewBridgesData(t *testing.T, num int, blockNum []uint64) []bridgesync.Bridge { - t.Helper() - if num == 0 { - num = len(blockNum) - } - res := make([]bridgesync.Bridge, 0) - for i := 0; i < num; i++ { - res = append(res, bridgesync.Bridge{ - BlockNum: blockNum[i%len(blockNum)], - BlockPos: 0, - LeafType: agglayer.LeafTypeAsset.Uint8(), - OriginNetwork: 1, - }) - } - return res -} - -func NewClaimData(t *testing.T, num int, blockNum []uint64) []bridgesync.Claim { - t.Helper() - if num == 0 { - num = len(blockNum) - } - res := make([]bridgesync.Claim, 0) - for i := 0; i < num; i++ { - res = append(res, bridgesync.Claim{ - BlockNum: blockNum[i%len(blockNum)], - BlockPos: 0, - }) - } - return res -} - -func certInfoToCertHeader(t *testing.T, certInfo *aggsendertypes.CertificateInfo, networkID uint32) *agglayer.CertificateHeader { - t.Helper() - if certInfo == nil { - return nil - } - return &agglayer.CertificateHeader{ - Height: certInfo.Height, - NetworkID: networkID, - CertificateID: certInfo.CertificateID, - NewLocalExitRoot: certInfo.NewLocalExitRoot, - Status: agglayer.Pending, - Metadata: aggsendertypes.NewCertificateMetadata( - certInfo.FromBlock, - uint32(certInfo.FromBlock-certInfo.ToBlock), - certInfo.CreatedAt, - ).ToHash(), - } -} - -func newAggsenderTestData(t *testing.T, creationFlags testDataFlags) *aggsenderTestData { - t.Helper() - l2syncerMock := mocks.NewL2BridgeSyncer(t) - agglayerClientMock := agglayer.NewAgglayerClientMock(t) - l1InfoTreeSyncerMock := mocks.NewL1InfoTreeSyncer(t) - logger := log.WithFields("aggsender-test", "checkLastCertificateFromAgglayer") - var storageMock *mocks.AggSenderStorage - var storage db.AggSenderStorage - var err error - if creationFlags&testDataFlagMockStorage != 0 { - storageMock = mocks.NewAggSenderStorage(t) - storage = storageMock - } else { - dbPath := path.Join(t.TempDir(), "newAggsenderTestData.sqlite") - storageConfig := db.AggSenderSQLStorageConfig{ - DBPath: dbPath, - KeepCertificatesHistory: true, - } - storage, err = db.NewAggSenderSQLStorage(logger, storageConfig) - require.NoError(t, err) - } - - ctx := context.TODO() - sut := &AggSender{ - log: logger, - l2Syncer: l2syncerMock, - aggLayerClient: agglayerClientMock, - storage: storage, - l1infoTreeSyncer: l1InfoTreeSyncerMock, - cfg: Config{ - MaxCertSize: 1024 * 1024, - }, - } - testCerts := []aggsendertypes.CertificateInfo{ - { - Height: 1, - CertificateID: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - Status: agglayer.Pending, - }, - { - Height: 2, - CertificateID: common.HexToHash("0x1a111"), - NewLocalExitRoot: common.HexToHash("0x2a2"), - Status: agglayer.Pending, - }, - } - - return &aggsenderTestData{ - ctx: ctx, - agglayerClientMock: agglayerClientMock, - l2syncerMock: l2syncerMock, - l1InfoTreeSyncerMock: l1InfoTreeSyncerMock, - storageMock: storageMock, - sut: sut, - testCerts: testCerts, - } -} diff --git a/aggsender/block_notifier_polling.go b/aggsender/block_notifier_polling.go deleted file mode 100644 index dce860e85..000000000 --- a/aggsender/block_notifier_polling.go +++ /dev/null @@ -1,228 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - "math/big" - "sync" - "time" - - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/etherman" -) - -var ( - timeNowFunc = time.Now -) - -const ( - AutomaticBlockInterval = time.Second * 0 - // minBlockInterval is the minimum interval at which the AggSender will check for new blocks - minBlockInterval = time.Second - // maxBlockInterval is the maximum interval at which the AggSender will check for new blocks - maxBlockInterval = time.Minute - // Percentage period of reach the next block - percentForNextBlock = 80 -) - -type ConfigBlockNotifierPolling struct { - // BlockFinalityType is the finality of the block to be notified - BlockFinalityType etherman.BlockNumberFinality - // CheckNewBlockInterval is the interval at which the AggSender will check for new blocks - // if is 0 it will be calculated automatically - CheckNewBlockInterval time.Duration -} - -type BlockNotifierPolling struct { - ethClient types.EthClient - blockFinality *big.Int - logger types.Logger - config ConfigBlockNotifierPolling - mu sync.Mutex - lastStatus *blockNotifierPollingInternalStatus - types.GenericSubscriber[types.EventNewBlock] -} - -// NewBlockNotifierPolling creates a new BlockNotifierPolling. -// if param `subscriber` is nil a new GenericSubscriberImpl[types.EventNewBlock] will be created. -// To use this class you need to subscribe and each time that a new block appear the subscriber -// will be notified through the channel. (check unit tests TestExploratoryBlockNotifierPolling -// for more information) -func NewBlockNotifierPolling(ethClient types.EthClient, - config ConfigBlockNotifierPolling, - logger types.Logger, - subscriber types.GenericSubscriber[types.EventNewBlock]) (*BlockNotifierPolling, error) { - if subscriber == nil { - subscriber = NewGenericSubscriberImpl[types.EventNewBlock]() - } - finality, err := config.BlockFinalityType.ToBlockNum() - if err != nil { - return nil, fmt.Errorf("failed to convert block finality type to block number: %w", err) - } - - return &BlockNotifierPolling{ - ethClient: ethClient, - blockFinality: finality, - logger: logger, - config: config, - GenericSubscriber: subscriber, - }, nil -} - -func (b *BlockNotifierPolling) String() string { - status := b.getGlobalStatus() - res := fmt.Sprintf("BlockNotifierPolling: finality=%s", b.config.BlockFinalityType) - if status != nil { - res += fmt.Sprintf(" lastBlockSeen=%d", status.lastBlockSeen) - } else { - res += " lastBlockSeen=none" - } - return res -} - -// Start starts the BlockNotifierPolling blocking the current goroutine -func (b *BlockNotifierPolling) Start(ctx context.Context) { - ticker := time.NewTimer(b.config.CheckNewBlockInterval) - defer ticker.Stop() - - var status *blockNotifierPollingInternalStatus = nil - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - delay, newStatus, event := b.step(ctx, status) - status = newStatus - b.setGlobalStatus(status) - if event != nil { - b.Publish(*event) - } - ticker.Reset(delay) - } - } -} - -func (b *BlockNotifierPolling) setGlobalStatus(status *blockNotifierPollingInternalStatus) { - b.mu.Lock() - defer b.mu.Unlock() - b.lastStatus = status -} - -func (b *BlockNotifierPolling) getGlobalStatus() *blockNotifierPollingInternalStatus { - b.mu.Lock() - defer b.mu.Unlock() - if b.lastStatus == nil { - return nil - } - copyStatus := *b.lastStatus - return ©Status -} - -// step is the main function of the BlockNotifierPolling, it checks if there is a new block -// it returns: -// - the delay for the next check -// - the new status -// - the new even to emit or nil -func (b *BlockNotifierPolling) step(ctx context.Context, - previousState *blockNotifierPollingInternalStatus) (time.Duration, - *blockNotifierPollingInternalStatus, *types.EventNewBlock) { - currentBlock, err := b.ethClient.HeaderByNumber(ctx, b.blockFinality) - if err == nil && currentBlock == nil { - err = fmt.Errorf("failed to get block number: return a nil block") - } - if err != nil { - b.logger.Errorf("Failed to get block number: %v", err) - newState := previousState.clear() - return b.nextBlockRequestDelay(nil, err), newState, nil - } - if previousState == nil { - newState := previousState.intialBlock(currentBlock.Number.Uint64()) - return b.nextBlockRequestDelay(previousState, nil), newState, nil - } - if currentBlock.Number.Uint64() == previousState.lastBlockSeen { - // No new block, so no changes on state - return b.nextBlockRequestDelay(previousState, nil), previousState, nil - } - // New blockNumber! - eventToEmit := &types.EventNewBlock{ - BlockNumber: currentBlock.Number.Uint64(), - BlockFinalityType: b.config.BlockFinalityType, - } - if previousState.lastBlockSeen > currentBlock.Number.Uint64() { - b.logger.Warnf("Block number decreased [finality:%s]: %d -> %d", - b.config.BlockFinalityType, previousState.lastBlockSeen, currentBlock.Number.Uint64()) - // It start from scratch because something fails in calculation of block period - newState := previousState.intialBlock(currentBlock.Number.Uint64()) - return b.nextBlockRequestDelay(nil, nil), newState, eventToEmit - } - - if currentBlock.Number.Uint64()-previousState.lastBlockSeen != 1 { - b.logger.Warnf("Missed block(s) [finality:%s]: %d -> %d", - b.config.BlockFinalityType, previousState.lastBlockSeen, currentBlock.Number.Uint64()) - // It start from scratch because something fails in calculation of block period - newState := previousState.intialBlock(currentBlock.Number.Uint64()) - return b.nextBlockRequestDelay(nil, nil), newState, eventToEmit - } - newState := previousState.incommingNewBlock(currentBlock.Number.Uint64()) - b.logger.Debugf("New block seen [finality:%s]: %d. blockRate:%s", - b.config.BlockFinalityType, currentBlock.Number.Uint64(), newState.previousBlockTime) - eventToEmit.BlockRate = *newState.previousBlockTime - return b.nextBlockRequestDelay(newState, nil), newState, eventToEmit -} - -func (b *BlockNotifierPolling) nextBlockRequestDelay(status *blockNotifierPollingInternalStatus, - err error) time.Duration { - if b.config.CheckNewBlockInterval != AutomaticBlockInterval { - return b.config.CheckNewBlockInterval - } - // Initial stages wait the minimum interval to increas accuracy - if status == nil || status.previousBlockTime == nil { - return minBlockInterval - } - if err != nil { - // If error we wait twice the min interval - return minBlockInterval * 2 //nolint:mnd // 2 times the interval - } - // we have a previous block time so we can calculate the interval - now := timeNowFunc() - expectedTimeNextBlock := status.lastBlockTime.Add(*status.previousBlockTime) - distanceToNextBlock := expectedTimeNextBlock.Sub(now) - interval := distanceToNextBlock * percentForNextBlock / 100 //nolint:mnd // percent period for reach the next block - return max(minBlockInterval, min(maxBlockInterval, interval)) -} - -type blockNotifierPollingInternalStatus struct { - lastBlockSeen uint64 - lastBlockTime time.Time // first appear of block lastBlockSeen - previousBlockTime *time.Duration // time of the previous block to appear -} - -func (s *blockNotifierPollingInternalStatus) String() string { - if s == nil { - return "nil" - } - return fmt.Sprintf("lastBlockSeen=%d lastBlockTime=%s previousBlockTime=%s", - s.lastBlockSeen, s.lastBlockTime, s.previousBlockTime) -} - -func (s *blockNotifierPollingInternalStatus) clear() *blockNotifierPollingInternalStatus { - return &blockNotifierPollingInternalStatus{} -} - -func (s *blockNotifierPollingInternalStatus) intialBlock(block uint64) *blockNotifierPollingInternalStatus { - return &blockNotifierPollingInternalStatus{ - lastBlockSeen: block, - lastBlockTime: timeNowFunc(), - } -} - -func (s *blockNotifierPollingInternalStatus) incommingNewBlock(block uint64) *blockNotifierPollingInternalStatus { - now := timeNowFunc() - timePreviousBlock := now.Sub(s.lastBlockTime) - return &blockNotifierPollingInternalStatus{ - lastBlockSeen: block, - lastBlockTime: now, - previousBlockTime: &timePreviousBlock, - } -} diff --git a/aggsender/block_notifier_polling_test.go b/aggsender/block_notifier_polling_test.go deleted file mode 100644 index b4c4e6296..000000000 --- a/aggsender/block_notifier_polling_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - "math/big" - "os" - "testing" - "time" - - "github.com/0xPolygon/cdk/aggsender/mocks" - aggsendertypes "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestExploratoryBlockNotifierPolling(t *testing.T) { - t.Skip() - urlRPCL1 := os.Getenv("L1URL") - fmt.Println("URL=", urlRPCL1) - ethClient, err := ethclient.Dial(urlRPCL1) - require.NoError(t, err) - - sut, errSut := NewBlockNotifierPolling(ethClient, - ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.LatestBlock, - }, log.WithFields("test", "test"), nil) - require.NoError(t, errSut) - go sut.Start(context.Background()) - ch := sut.Subscribe("test") - for block := range ch { - fmt.Println(block) - } -} - -func TestBlockNotifierPollingStep(t *testing.T) { - time0 := time.Unix(1731322117, 0) - period0 := time.Second * 10 - period0_80percent := time.Second * 8 - time1 := time0.Add(period0) - tests := []struct { - name string - previousStatus *blockNotifierPollingInternalStatus - HeaderByNumberError bool - HeaderByNumberErrorNumber uint64 - forcedTime time.Time - expectedStatus *blockNotifierPollingInternalStatus - expectedDelay time.Duration - expectedEvent *aggsendertypes.EventNewBlock - }{ - { - name: "initial->receive block", - previousStatus: nil, - HeaderByNumberError: false, - HeaderByNumberErrorNumber: 100, - forcedTime: time0, - expectedStatus: &blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - lastBlockTime: time0, - }, - expectedDelay: minBlockInterval, - expectedEvent: nil, - }, - { - name: "received block->error", - previousStatus: nil, - HeaderByNumberError: true, - forcedTime: time0, - expectedStatus: &blockNotifierPollingInternalStatus{}, - expectedDelay: minBlockInterval, - expectedEvent: nil, - }, - - { - name: "have block period->receive new block", - previousStatus: &blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - lastBlockTime: time0, - previousBlockTime: &period0, - }, - HeaderByNumberError: false, - HeaderByNumberErrorNumber: 101, - forcedTime: time1, - expectedStatus: &blockNotifierPollingInternalStatus{ - lastBlockSeen: 101, - lastBlockTime: time1, - previousBlockTime: &period0, - }, - expectedDelay: period0_80percent, - expectedEvent: &aggsendertypes.EventNewBlock{ - BlockNumber: 101, - }, - }, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - - timeNowFunc = func() time.Time { - return tt.forcedTime - } - - if tt.HeaderByNumberError == false { - hdr1 := &types.Header{ - Number: big.NewInt(int64(tt.HeaderByNumberErrorNumber)), - } - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr1, nil).Once() - } else { - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(nil, fmt.Errorf("error")).Once() - } - delay, newStatus, event := testData.sut.step(context.TODO(), tt.previousStatus) - require.Equal(t, tt.expectedDelay, delay, "delay") - require.Equal(t, tt.expectedStatus, newStatus, "new_status") - if tt.expectedEvent == nil { - require.Nil(t, event, "send_event") - } else { - require.Equal(t, tt.expectedEvent.BlockNumber, event.BlockNumber, "send_event") - } - }) - } -} - -func TestDelayNoPreviousBLock(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - status := blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - } - delay := testData.sut.nextBlockRequestDelay(&status, nil) - require.Equal(t, minBlockInterval, delay) -} - -func TestDelayBLock(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - pt := time.Second * 10 - status := blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - previousBlockTime: &pt, - } - delay := testData.sut.nextBlockRequestDelay(&status, nil) - require.Equal(t, minBlockInterval, delay) -} - -func TestNewBlockNotifierPolling(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - require.NotNil(t, testData.sut) - _, err := NewBlockNotifierPolling(testData.ethClientMock, ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.BlockNumberFinality("invalid"), - }, log.WithFields("test", "test"), nil) - require.Error(t, err) -} - -func TestBlockNotifierPollingString(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - require.NotEmpty(t, testData.sut.String()) - testData.sut.lastStatus = &blockNotifierPollingInternalStatus{ - lastBlockSeen: 100, - } - require.NotEmpty(t, testData.sut.String()) -} - -func TestBlockNotifierPollingStart(t *testing.T) { - testData := newBlockNotifierPollingTestData(t, nil) - ch := testData.sut.Subscribe("test") - hdr1 := &types.Header{ - Number: big.NewInt(100), - } - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr1, nil).Once() - hdr2 := &types.Header{ - Number: big.NewInt(101), - } - testData.ethClientMock.EXPECT().HeaderByNumber(mock.Anything, mock.Anything).Return(hdr2, nil).Once() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go testData.sut.Start(ctx) - block := <-ch - require.NotNil(t, block) - require.Equal(t, uint64(101), block.BlockNumber) -} - -type blockNotifierPollingTestData struct { - sut *BlockNotifierPolling - ethClientMock *mocks.EthClient - ctx context.Context -} - -func newBlockNotifierPollingTestData(t *testing.T, config *ConfigBlockNotifierPolling) blockNotifierPollingTestData { - t.Helper() - if config == nil { - config = &ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.LatestBlock, - CheckNewBlockInterval: 0, - } - } - ethClientMock := mocks.NewEthClient(t) - logger := log.WithFields("test", "BlockNotifierPolling") - sut, err := NewBlockNotifierPolling(ethClientMock, *config, logger, nil) - require.NoError(t, err) - return blockNotifierPollingTestData{ - sut: sut, - ethClientMock: ethClientMock, - ctx: context.TODO(), - } -} diff --git a/aggsender/config.go b/aggsender/config.go deleted file mode 100644 index a81c12990..000000000 --- a/aggsender/config.go +++ /dev/null @@ -1,58 +0,0 @@ -package aggsender - -import ( - "fmt" - - "github.com/0xPolygon/cdk/config/types" -) - -// Config is the configuration for the AggSender -type Config struct { - // StoragePath is the path of the sqlite db on which the AggSender will store the data - StoragePath string `mapstructure:"StoragePath"` - // AggLayerURL is the URL of the AggLayer - AggLayerURL string `mapstructure:"AggLayerURL"` - // AggsenderPrivateKey is the private key which is used to sign certificates - AggsenderPrivateKey types.KeystoreFileConfig `mapstructure:"AggsenderPrivateKey"` - // URLRPCL2 is the URL of the L2 RPC node - URLRPCL2 string `mapstructure:"URLRPCL2"` - // BlockFinality indicates which finality follows AggLayer - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - // EpochNotificationPercentage indicates the percentage of the epoch - // the AggSender should send the certificate - // 0 -> Begin - // 50 -> Middle - EpochNotificationPercentage uint `mapstructure:"EpochNotificationPercentage"` - // SaveCertificatesToFilesPath if != "" tells the AggSender to save the certificates to a file in this path - SaveCertificatesToFilesPath string `mapstructure:"SaveCertificatesToFilesPath"` - - // MaxRetriesStoreCertificate is the maximum number of retries to store a certificate - // 0 is infinite - MaxRetriesStoreCertificate int `mapstructure:"MaxRetriesStoreCertificate"` - // DelayBeetweenRetries is the delay between retries: - // is used on store Certificate and also in initial check - DelayBeetweenRetries types.Duration `mapstructure:"DelayBeetweenRetries"` - // KeepCertificatesHistory is a flag to keep the certificates history on storage - KeepCertificatesHistory bool `mapstructure:"KeepCertificatesHistory"` - // MaxCertSize is the maximum size of the certificate (the emitted certificate can be bigger that this size) - // 0 is infinite - MaxCertSize uint `mapstructure:"MaxCertSize"` - // BridgeMetadataAsHash is a flag to import the bridge metadata as hash - BridgeMetadataAsHash bool `mapstructure:"BridgeMetadataAsHash"` - // DryRun is a flag to enable the dry run mode - // in this mode the AggSender will not send the certificates to Agglayer - DryRun bool `mapstructure:"DryRun"` - // EnableRPC is a flag to enable the RPC for aggsender - EnableRPC bool `mapstructure:"EnableRPC"` -} - -// String returns a string representation of the Config -func (c Config) String() string { - return "StoragePath: " + c.StoragePath + "\n" + - "AggLayerURL: " + c.AggLayerURL + "\n" + - "AggsenderPrivateKeyPath: " + c.AggsenderPrivateKey.Path + "\n" + - "URLRPCL2: " + c.URLRPCL2 + "\n" + - "BlockFinality: " + c.BlockFinality + "\n" + - "EpochNotificationPercentage: " + fmt.Sprintf("%d", c.EpochNotificationPercentage) + "\n" + - "SaveCertificatesToFilesPath: " + c.SaveCertificatesToFilesPath + "\n" -} diff --git a/aggsender/db/aggsender_db_storage.go b/aggsender/db/aggsender_db_storage.go deleted file mode 100644 index 3a9a9f12d..000000000 --- a/aggsender/db/aggsender_db_storage.go +++ /dev/null @@ -1,260 +0,0 @@ -package db - -import ( - "context" - "database/sql" - "errors" - "fmt" - "strings" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/db/migrations" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" -) - -const errWhileRollbackFormat = "error while rolling back tx: %w" - -// AggSenderStorage is the interface that defines the methods to interact with the storage -type AggSenderStorage interface { - // GetCertificateByHeight returns a certificate by its height - GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) - // GetLastSentCertificate returns the last certificate sent to the aggLayer - GetLastSentCertificate() (*types.CertificateInfo, error) - // SaveLastSentCertificate saves the last certificate sent to the aggLayer - SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error - // DeleteCertificate deletes a certificate from the storage - DeleteCertificate(ctx context.Context, certificateID common.Hash) error - // GetCertificatesByStatus returns a list of certificates by their status - GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) - // UpdateCertificate updates certificate in db - UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error -} - -var _ AggSenderStorage = (*AggSenderSQLStorage)(nil) - -// AggSenderSQLStorageConfig is the configuration for the AggSenderSQLStorage -type AggSenderSQLStorageConfig struct { - DBPath string - KeepCertificatesHistory bool -} - -// AggSenderSQLStorage is the struct that implements the AggSenderStorage interface -type AggSenderSQLStorage struct { - logger *log.Logger - db *sql.DB - cfg AggSenderSQLStorageConfig -} - -// NewAggSenderSQLStorage creates a new AggSenderSQLStorage -func NewAggSenderSQLStorage(logger *log.Logger, cfg AggSenderSQLStorageConfig) (*AggSenderSQLStorage, error) { - db, err := db.NewSQLiteDB(cfg.DBPath) - if err != nil { - return nil, err - } - if err := migrations.RunMigrations(logger, db); err != nil { - return nil, err - } - - return &AggSenderSQLStorage{ - db: db, - logger: logger, - cfg: cfg, - }, nil -} - -func (a *AggSenderSQLStorage) GetCertificatesByStatus( - statuses []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { - query := "SELECT * FROM certificate_info" - args := make([]interface{}, len(statuses)) - - if len(statuses) > 0 { - placeholders := make([]string, len(statuses)) - // Build the WHERE clause for status filtering - for i := range statuses { - placeholders[i] = fmt.Sprintf("$%d", i+1) - args[i] = statuses[i] - } - - // Build the WHERE clause with the joined placeholders - query += " WHERE status IN (" + strings.Join(placeholders, ", ") + ")" - } - - // Add ordering by creation date (oldest first) - query += " ORDER BY height ASC" - - var certificates []*types.CertificateInfo - if err := meddler.QueryAll(a.db, &certificates, query, args...); err != nil { - return nil, err - } - - return certificates, nil -} - -// GetCertificateByHeight returns a certificate by its height -func (a *AggSenderSQLStorage) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { - return getCertificateByHeight(a.db, height) -} - -// getCertificateByHeight returns a certificate by its height using the provided db -func getCertificateByHeight(db db.Querier, - height uint64) (*types.CertificateInfo, error) { - var certificateInfo types.CertificateInfo - if err := meddler.QueryRow(db, &certificateInfo, - "SELECT * FROM certificate_info WHERE height = $1;", height); err != nil { - return nil, getSelectQueryError(height, err) - } - - return &certificateInfo, nil -} - -// GetLastSentCertificate returns the last certificate sent to the aggLayer -func (a *AggSenderSQLStorage) GetLastSentCertificate() (*types.CertificateInfo, error) { - var certificateInfo types.CertificateInfo - if err := meddler.QueryRow(a.db, &certificateInfo, - "SELECT * FROM certificate_info ORDER BY height DESC LIMIT 1;"); err != nil { - return nil, getSelectQueryError(0, err) - } - - return &certificateInfo, nil -} - -// SaveLastSentCertificate saves the last certificate sent to the aggLayer -func (a *AggSenderSQLStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return fmt.Errorf("saveLastSentCertificate NewTx. Err: %w", err) - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - cert, err := getCertificateByHeight(tx, certificate.Height) - if err != nil && !errors.Is(err, db.ErrNotFound) { - return fmt.Errorf("saveLastSentCertificate getCertificateByHeight. Err: %w", err) - } - - if cert != nil { - // we already have a certificate with this height - // we need to delete it before inserting the new one - if err = a.moveCertificateToHistoryOrDelete(tx, cert); err != nil { - return fmt.Errorf("saveLastSentCertificate moveCertificateToHistory Err: %w", err) - } - } - - if err = meddler.Insert(tx, "certificate_info", &certificate); err != nil { - return fmt.Errorf("error inserting certificate info: %w", err) - } - - if err = tx.Commit(); err != nil { - return fmt.Errorf("saveLastSentCertificate commit. Err: %w", err) - } - shouldRollback = false - - a.logger.Debugf("inserted certificate - Height: %d. Hash: %s", certificate.Height, certificate.CertificateID) - - return nil -} - -func (a *AggSenderSQLStorage) moveCertificateToHistoryOrDelete(tx db.Querier, - certificate *types.CertificateInfo) error { - if a.cfg.KeepCertificatesHistory { - a.logger.Debugf("moving certificate to history - new CertificateID: %s", certificate.ID()) - if _, err := tx.Exec(`INSERT INTO certificate_info_history SELECT * FROM certificate_info WHERE height = $1;`, - certificate.Height); err != nil { - return fmt.Errorf("error moving certificate to history: %w", err) - } - } - a.logger.Debugf("deleting certificate - CertificateID: %s", certificate.ID()) - if err := deleteCertificate(tx, certificate.CertificateID); err != nil { - return fmt.Errorf("deleteCertificate %s . Error: %w", certificate.ID(), err) - } - - return nil -} - -// DeleteCertificate deletes a certificate from the storage -func (a *AggSenderSQLStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return err - } - defer func() { - if err != nil { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - if err = deleteCertificate(tx, certificateID); err != nil { - return err - } - - if err = tx.Commit(); err != nil { - return err - } - a.logger.Debugf("deleted certificate - CertificateID: %s", certificateID) - return nil -} - -// deleteCertificate deletes a certificate from the storage using the provided db -func deleteCertificate(tx db.Querier, certificateID common.Hash) error { - if _, err := tx.Exec(`DELETE FROM certificate_info WHERE certificate_id = $1;`, certificateID.String()); err != nil { - return fmt.Errorf("error deleting certificate info: %w", err) - } - - return nil -} - -// UpdateCertificate updates a certificate -func (a *AggSenderSQLStorage) UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error { - tx, err := db.NewTx(ctx, a.db) - if err != nil { - return err - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRllbck := tx.Rollback(); errRllbck != nil { - a.logger.Errorf(errWhileRollbackFormat, errRllbck) - } - } - }() - - if _, err = tx.Exec(`UPDATE certificate_info SET status = $1, updated_at = $2 WHERE certificate_id = $3;`, - certificate.Status, certificate.UpdatedAt, certificate.CertificateID.String()); err != nil { - return fmt.Errorf("error updating certificate info: %w", err) - } - if err = tx.Commit(); err != nil { - return err - } - shouldRollback = false - - a.logger.Debugf("updated certificate status - CertificateID: %s", certificate.CertificateID) - - return nil -} - -func getSelectQueryError(height uint64, err error) error { - errToReturn := err - if errors.Is(err, sql.ErrNoRows) { - if height == 0 { - // height 0 is never sent to the aggLayer - // so we don't return an error in this case - errToReturn = nil - } else { - errToReturn = db.ErrNotFound - } - } - - return errToReturn -} diff --git a/aggsender/db/aggsender_db_storage_test.go b/aggsender/db/aggsender_db_storage_test.go deleted file mode 100644 index 912d243c7..000000000 --- a/aggsender/db/aggsender_db_storage_test.go +++ /dev/null @@ -1,427 +0,0 @@ -package db - -import ( - "context" - "encoding/json" - "math/big" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func Test_Storage(t *testing.T) { - ctx := context.Background() - - path := path.Join(t.TempDir(), "aggsenderTest_Storage.sqlite") - log.Debugf("sqlite path: %s", path) - cfg := AggSenderSQLStorageConfig{ - DBPath: path, - KeepCertificatesHistory: true, - } - - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) - require.NoError(t, err) - - updateTime := uint32(time.Now().UTC().UnixMilli()) - - t.Run("SaveLastSentCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - FromBlock: 1, - ToBlock: 2, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("DeleteCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 2, - CertificateID: common.HexToHash("0x3"), - NewLocalExitRoot: common.HexToHash("0x4"), - FromBlock: 3, - ToBlock: 4, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - require.NoError(t, storage.DeleteCertificate(ctx, certificate.CertificateID)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.ErrorIs(t, err, db.ErrNotFound) - require.Nil(t, certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("GetLastSentCertificate", func(t *testing.T) { - // try getting a certificate that doesn't exist - certificateFromDB, err := storage.GetLastSentCertificate() - require.NoError(t, err) - require.Nil(t, certificateFromDB) - - // try getting a certificate that exists - certificate := types.CertificateInfo{ - Height: 3, - CertificateID: common.HexToHash("0x5"), - NewLocalExitRoot: common.HexToHash("0x6"), - FromBlock: 5, - ToBlock: 6, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err = storage.GetLastSentCertificate() - require.NoError(t, err) - require.NotNil(t, certificateFromDB) - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("GetCertificateByHeight", func(t *testing.T) { - // try getting height 0 - certificateFromDB, err := storage.GetCertificateByHeight(0) - require.NoError(t, err) - require.Nil(t, certificateFromDB) - - // try getting a certificate that doesn't exist - certificateFromDB, err = storage.GetCertificateByHeight(4) - require.ErrorIs(t, err, db.ErrNotFound) - require.Nil(t, certificateFromDB) - - // try getting a certificate that exists - certificate := types.CertificateInfo{ - Height: 11, - CertificateID: common.HexToHash("0x17"), - NewLocalExitRoot: common.HexToHash("0x18"), - FromBlock: 17, - ToBlock: 18, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err = storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.NotNil(t, certificateFromDB) - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("GetCertificatesByStatus", func(t *testing.T) { - // Insert some certificates with different statuses - certificates := []*types.CertificateInfo{ - { - Height: 7, - CertificateID: common.HexToHash("0x7"), - NewLocalExitRoot: common.HexToHash("0x8"), - FromBlock: 7, - ToBlock: 8, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - }, - { - Height: 9, - CertificateID: common.HexToHash("0x9"), - NewLocalExitRoot: common.HexToHash("0xA"), - FromBlock: 9, - ToBlock: 10, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - }, - { - Height: 11, - CertificateID: common.HexToHash("0xB"), - NewLocalExitRoot: common.HexToHash("0xC"), - FromBlock: 11, - ToBlock: 12, - Status: agglayer.InError, - CreatedAt: updateTime, - UpdatedAt: updateTime, - }, - } - - for _, cert := range certificates { - require.NoError(t, storage.SaveLastSentCertificate(ctx, *cert)) - } - - // Test fetching certificates with status Settled - statuses := []agglayer.CertificateStatus{agglayer.Settled} - certificatesFromDB, err := storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 1) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[0]}, certificatesFromDB) - - // Test fetching certificates with status Pending - statuses = []agglayer.CertificateStatus{agglayer.Pending} - certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 1) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[1]}, certificatesFromDB) - - // Test fetching certificates with status InError - statuses = []agglayer.CertificateStatus{agglayer.InError} - certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 1) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[2]}, certificatesFromDB) - - // Test fetching certificates with status InError and Pending - statuses = []agglayer.CertificateStatus{agglayer.InError, agglayer.Pending} - certificatesFromDB, err = storage.GetCertificatesByStatus(statuses) - require.NoError(t, err) - require.Len(t, certificatesFromDB, 2) - require.ElementsMatch(t, []*types.CertificateInfo{certificates[1], certificates[2]}, certificatesFromDB) - - require.NoError(t, storage.clean()) - }) - - t.Run("UpdateCertificateStatus", func(t *testing.T) { - // Insert a certificate - certificate := types.CertificateInfo{ - Height: 13, - RetryCount: 1234, - CertificateID: common.HexToHash("0xD"), - NewLocalExitRoot: common.HexToHash("0xE"), - FromBlock: 13, - ToBlock: 14, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - // Update the status of the certificate - certificate.Status = agglayer.Settled - certificate.UpdatedAt = updateTime + 1 - require.NoError(t, storage.UpdateCertificate(ctx, certificate)) - - // Fetch the certificate and verify the status has been updated - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.Equal(t, certificate.Status, certificateFromDB.Status, "equal status") - require.Equal(t, certificate.UpdatedAt, certificateFromDB.UpdatedAt, "equal updated at") - - require.NoError(t, storage.clean()) - }) -} - -func Test_SaveLastSentCertificate(t *testing.T) { - ctx := context.Background() - - path := path.Join(t.TempDir(), "aggsenderTest_SaveLastSentCertificate.sqlite") - log.Debugf("sqlite path: %s", path) - cfg := AggSenderSQLStorageConfig{ - DBPath: path, - KeepCertificatesHistory: true, - } - - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) - require.NoError(t, err) - - updateTime := uint32(time.Now().UTC().UnixMilli()) - - t.Run("SaveNewCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - FromBlock: 1, - ToBlock: 2, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.Equal(t, certificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("UpdateExistingCertificate", func(t *testing.T) { - certificate := types.CertificateInfo{ - Height: 2, - CertificateID: common.HexToHash("0x3"), - NewLocalExitRoot: common.HexToHash("0x4"), - FromBlock: 3, - ToBlock: 4, - Status: agglayer.InError, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - // Update the certificate with the same height - updatedCertificate := types.CertificateInfo{ - Height: 2, - CertificateID: common.HexToHash("0x5"), - NewLocalExitRoot: common.HexToHash("0x6"), - FromBlock: 3, - ToBlock: 6, - Status: agglayer.Pending, - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, updatedCertificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(updatedCertificate.Height) - require.NoError(t, err) - require.Equal(t, updatedCertificate, *certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("SaveCertificateWithRollback", func(t *testing.T) { - // Simulate an error during the transaction to trigger a rollback - certificate := types.CertificateInfo{ - Height: 3, - CertificateID: common.HexToHash("0x7"), - NewLocalExitRoot: common.HexToHash("0x8"), - FromBlock: 7, - ToBlock: 8, - Status: agglayer.Settled, - CreatedAt: updateTime, - UpdatedAt: updateTime, - } - - // Close the database to force an error - require.NoError(t, storage.db.Close()) - - err := storage.SaveLastSentCertificate(ctx, certificate) - require.Error(t, err) - - // Reopen the database and check that the certificate was not saved - storage.db, err = db.NewSQLiteDB(path) - require.NoError(t, err) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.ErrorIs(t, err, db.ErrNotFound) - require.Nil(t, certificateFromDB) - require.NoError(t, storage.clean()) - }) - - t.Run("SaveCertificate with raw data", func(t *testing.T) { - certfiicate := &agglayer.SignedCertificate{ - Certificate: &agglayer.Certificate{ - NetworkID: 1, - Height: 1, - PrevLocalExitRoot: common.HexToHash("0x1"), - NewLocalExitRoot: common.HexToHash("0x2"), - Metadata: common.HexToHash("0x3"), - BridgeExits: []*agglayer.BridgeExit{ - { - LeafType: agglayer.LeafTypeAsset, - TokenInfo: &agglayer.TokenInfo{ - OriginNetwork: 1, - OriginTokenAddress: common.HexToAddress("0x1"), - }, - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("0x2"), - Amount: big.NewInt(100), - Metadata: []byte("metadata"), - }, - }, - ImportedBridgeExits: []*agglayer.ImportedBridgeExit{}, - }, - Signature: &agglayer.Signature{ - R: common.HexToHash("0x4"), - S: common.HexToHash("0x5"), - OddParity: false, - }, - } - - raw, err := json.Marshal(certfiicate) - require.NoError(t, err) - - certificate := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x9"), - NewLocalExitRoot: common.HexToHash("0x2"), - FromBlock: 1, - ToBlock: 10, - Status: agglayer.Pending, - CreatedAt: updateTime, - UpdatedAt: updateTime, - SignedCertificate: string(raw), - } - require.NoError(t, storage.SaveLastSentCertificate(ctx, certificate)) - - certificateFromDB, err := storage.GetCertificateByHeight(certificate.Height) - require.NoError(t, err) - require.Equal(t, certificate, *certificateFromDB) - require.Equal(t, raw, []byte(certificateFromDB.SignedCertificate)) - - require.NoError(t, storage.clean()) - }) -} - -func (a *AggSenderSQLStorage) clean() error { - if _, err := a.db.Exec(`DELETE FROM certificate_info;`); err != nil { - return err - } - - return nil -} - -func Test_StoragePreviousLER(t *testing.T) { - ctx := context.TODO() - dbPath := path.Join(t.TempDir(), "Test_StoragePreviousLER.sqlite") - cfg := AggSenderSQLStorageConfig{ - DBPath: dbPath, - KeepCertificatesHistory: true, - } - storage, err := NewAggSenderSQLStorage(log.WithFields("aggsender-db"), cfg) - require.NoError(t, err) - require.NotNil(t, storage) - - certNoLER := types.CertificateInfo{ - Height: 0, - CertificateID: common.HexToHash("0x1"), - Status: agglayer.InError, - NewLocalExitRoot: common.HexToHash("0x2"), - } - err = storage.SaveLastSentCertificate(ctx, certNoLER) - require.NoError(t, err) - - readCertNoLER, err := storage.GetCertificateByHeight(0) - require.NoError(t, err) - require.NotNil(t, readCertNoLER) - require.Equal(t, certNoLER, *readCertNoLER) - - certLER := types.CertificateInfo{ - Height: 1, - CertificateID: common.HexToHash("0x2"), - Status: agglayer.InError, - NewLocalExitRoot: common.HexToHash("0x2"), - PreviousLocalExitRoot: &common.Hash{}, - } - err = storage.SaveLastSentCertificate(ctx, certLER) - require.NoError(t, err) - - readCertWithLER, err := storage.GetCertificateByHeight(1) - require.NoError(t, err) - require.NotNil(t, readCertWithLER) - require.Equal(t, certLER, *readCertWithLER) -} diff --git a/aggsender/db/migrations/0001.sql b/aggsender/db/migrations/0001.sql deleted file mode 100644 index d418f1d89..000000000 --- a/aggsender/db/migrations/0001.sql +++ /dev/null @@ -1,35 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS certificate_info; -DROP TABLE IF EXISTS certificate_info_history; -DROP TABLE IF EXISTS certificate_info_history; - --- +migrate Up -CREATE TABLE certificate_info ( - height INTEGER NOT NULL, - retry_count INTEGER DEFAULT 0, - certificate_id VARCHAR NOT NULL, - status INTEGER NOT NULL, - previous_local_exit_root VARCHAR, - new_local_exit_root VARCHAR NOT NULL, - from_block INTEGER NOT NULL, - to_block INTEGER NOT NULL, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - signed_certificate TEXT, - PRIMARY KEY (height) -); - -CREATE TABLE certificate_info_history ( - height INTEGER NOT NULL , - retry_count INTEGER DEFAULT 0, - certificate_id VARCHAR NOT NULL, - status INTEGER NOT NULL, - previous_local_exit_root VARCHAR, - new_local_exit_root VARCHAR NOT NULL, - from_block INTEGER NOT NULL, - to_block INTEGER NOT NULL, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - signed_certificate TEXT, - PRIMARY KEY (height, retry_count) -); diff --git a/aggsender/db/migrations/migrations.go b/aggsender/db/migrations/migrations.go deleted file mode 100644 index 78c58b85e..000000000 --- a/aggsender/db/migrations/migrations.go +++ /dev/null @@ -1,24 +0,0 @@ -package migrations - -import ( - "database/sql" - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" - "github.com/0xPolygon/cdk/log" -) - -//go:embed 0001.sql -var mig001 string - -func RunMigrations(logger *log.Logger, database *sql.DB) error { - migrations := []types.Migration{ - { - ID: "0001", - SQL: mig001, - }, - } - - return db.RunMigrationsDB(logger, database, migrations) -} diff --git a/aggsender/epoch_notifier_per_block.go b/aggsender/epoch_notifier_per_block.go deleted file mode 100644 index 80494cc0d..000000000 --- a/aggsender/epoch_notifier_per_block.go +++ /dev/null @@ -1,217 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/types" -) - -const ( - maxPercent = 100.0 -) - -type ExtraInfoEventEpoch struct { - PendingBlocks int -} - -func (e *ExtraInfoEventEpoch) String() string { - return fmt.Sprintf("ExtraInfoEventEpoch: pendingBlocks=%d", e.PendingBlocks) -} - -type ConfigEpochNotifierPerBlock struct { - StartingEpochBlock uint64 - NumBlockPerEpoch uint - - // EpochNotificationPercentage - // 0 -> begin new Epoch - // 50 -> middle of epoch - // 100 -> end of epoch (same as 0) - EpochNotificationPercentage uint -} - -func (c *ConfigEpochNotifierPerBlock) String() string { - if c == nil { - return "nil" - } - return fmt.Sprintf("{startEpochBlock=%d, sizeEpoch=%d, threshold=%d%%}", - c.StartingEpochBlock, c.NumBlockPerEpoch, c.EpochNotificationPercentage) -} - -func NewConfigEpochNotifierPerBlock(aggLayer agglayer.AggLayerClientGetEpochConfiguration, - epochNotificationPercentage uint) (*ConfigEpochNotifierPerBlock, error) { - if aggLayer == nil { - return nil, fmt.Errorf("newConfigEpochNotifierPerBlock: aggLayerClient is required") - } - clockConfig, err := aggLayer.GetEpochConfiguration() - if err != nil { - return nil, fmt.Errorf("newConfigEpochNotifierPerBlock: error getting clock configuration from AggLayer: %w", err) - } - return &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: clockConfig.GenesisBlock, - NumBlockPerEpoch: uint(clockConfig.EpochDuration), - EpochNotificationPercentage: epochNotificationPercentage, - }, nil -} - -func (c *ConfigEpochNotifierPerBlock) Validate() error { - if c.NumBlockPerEpoch == 0 { - return fmt.Errorf("numBlockPerEpoch: num block per epoch is required > 0 ") - } - if c.EpochNotificationPercentage >= maxPercent { - return fmt.Errorf("epochNotificationPercentage: must be between 0 and 99") - } - return nil -} - -type EpochNotifierPerBlock struct { - blockNotifier types.BlockNotifier - logger types.Logger - - lastStartingEpochBlock uint64 - - Config ConfigEpochNotifierPerBlock - types.GenericSubscriber[types.EpochEvent] -} - -func NewEpochNotifierPerBlock(blockNotifier types.BlockNotifier, - logger types.Logger, - config ConfigEpochNotifierPerBlock, - subscriber types.GenericSubscriber[types.EpochEvent]) (*EpochNotifierPerBlock, error) { - if subscriber == nil { - subscriber = NewGenericSubscriberImpl[types.EpochEvent]() - } - - err := config.Validate() - if err != nil { - return nil, fmt.Errorf("invalid config: %w", err) - } - return &EpochNotifierPerBlock{ - blockNotifier: blockNotifier, - logger: logger, - lastStartingEpochBlock: config.StartingEpochBlock, - Config: config, - GenericSubscriber: subscriber, - }, nil -} - -func (e *EpochNotifierPerBlock) String() string { - return fmt.Sprintf("EpochNotifierPerBlock: config: %s", e.Config.String()) -} - -// StartAsync starts the notifier in a goroutine -func (e *EpochNotifierPerBlock) StartAsync(ctx context.Context) { - eventNewBlockChannel := e.blockNotifier.Subscribe("EpochNotifierPerBlock") - go e.startInternal(ctx, eventNewBlockChannel) -} - -// Start starts the notifier synchronously -func (e *EpochNotifierPerBlock) Start(ctx context.Context) { - eventNewBlockChannel := e.blockNotifier.Subscribe("EpochNotifierPerBlock") - e.startInternal(ctx, eventNewBlockChannel) -} - -func (e *EpochNotifierPerBlock) startInternal(ctx context.Context, eventNewBlockChannel <-chan types.EventNewBlock) { - status := internalStatus{ - lastBlockSeen: e.Config.StartingEpochBlock, - waitingForEpoch: e.epochNumber(e.Config.StartingEpochBlock), - } - for { - select { - case <-ctx.Done(): - return - case newBlock := <-eventNewBlockChannel: - var event *types.EpochEvent - status, event = e.step(status, newBlock) - if event != nil { - e.logger.Debugf("new Epoch Event: %s", event.String()) - e.GenericSubscriber.Publish(*event) - } - } - } -} - -type internalStatus struct { - lastBlockSeen uint64 - waitingForEpoch uint64 -} - -func (e *EpochNotifierPerBlock) step(status internalStatus, - newBlock types.EventNewBlock) (internalStatus, *types.EpochEvent) { - currentBlock := newBlock.BlockNumber - if currentBlock < e.Config.StartingEpochBlock { - // This is a bit strange, the first epoch is in the future - e.logger.Warnf("Block number %d is before the starting first epoch block %d."+ - " Please check your config", currentBlock, e.Config.StartingEpochBlock) - return status, nil - } - // No new block - if currentBlock <= status.lastBlockSeen { - return status, nil - } - status.lastBlockSeen = currentBlock - - needNotify, closingEpoch := e.isNotificationRequired(currentBlock, status.waitingForEpoch) - percentEpoch := e.percentEpoch(currentBlock) - logFunc := e.logger.Debugf - if needNotify { - logFunc = e.logger.Infof - } - logFunc("New block seen [finality:%s]: %d. blockRate:%s Epoch:%d Percent:%f%% notify:%v config:%s", - newBlock.BlockFinalityType, newBlock.BlockNumber, newBlock.BlockRate, closingEpoch, - percentEpoch*maxPercent, needNotify, e.Config.String()) - if needNotify { - // Notify the epoch has started - info := e.infoEpoch(currentBlock, closingEpoch) - status.waitingForEpoch = closingEpoch + 1 - return status, &types.EpochEvent{ - Epoch: closingEpoch, - ExtraInfo: info, - } - } - return status, nil -} - -func (e *EpochNotifierPerBlock) infoEpoch(currentBlock, newEpochNotified uint64) *ExtraInfoEventEpoch { - nextBlockStartingEpoch := e.endBlockEpoch(newEpochNotified) - return &ExtraInfoEventEpoch{ - PendingBlocks: int(nextBlockStartingEpoch - currentBlock), - } -} -func (e *EpochNotifierPerBlock) percentEpoch(currentBlock uint64) float64 { - epoch := e.epochNumber(currentBlock) - startingBlock := e.startingBlockEpoch(epoch) - elapsedBlocks := currentBlock - startingBlock - return float64(elapsedBlocks) / float64(e.Config.NumBlockPerEpoch) -} -func (e *EpochNotifierPerBlock) isNotificationRequired(currentBlock, lastEpochNotified uint64) (bool, uint64) { - percentEpoch := e.percentEpoch(currentBlock) - thresholdPercent := float64(e.Config.EpochNotificationPercentage) / maxPercent - maxTresholdPercent := float64(e.Config.NumBlockPerEpoch-1) / float64(e.Config.NumBlockPerEpoch) - if thresholdPercent > maxTresholdPercent { - thresholdPercent = maxTresholdPercent - } - if percentEpoch < thresholdPercent { - return false, e.epochNumber(currentBlock) - } - nextEpoch := e.epochNumber(currentBlock) + 1 - return nextEpoch > lastEpochNotified, e.epochNumber(currentBlock) -} - -func (e *EpochNotifierPerBlock) startingBlockEpoch(epoch uint64) uint64 { - if epoch == 0 { - return e.Config.StartingEpochBlock - 1 - } - return e.Config.StartingEpochBlock + ((epoch - 1) * uint64(e.Config.NumBlockPerEpoch)) -} - -func (e *EpochNotifierPerBlock) endBlockEpoch(epoch uint64) uint64 { - return e.startingBlockEpoch(epoch + 1) -} -func (e *EpochNotifierPerBlock) epochNumber(currentBlock uint64) uint64 { - if currentBlock < e.Config.StartingEpochBlock { - return 0 - } - return 1 + ((currentBlock - e.Config.StartingEpochBlock) / uint64(e.Config.NumBlockPerEpoch)) -} diff --git a/aggsender/epoch_notifier_per_block_test.go b/aggsender/epoch_notifier_per_block_test.go deleted file mode 100644 index ac35350e5..000000000 --- a/aggsender/epoch_notifier_per_block_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package aggsender - -import ( - "context" - "fmt" - "testing" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/mocks" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestConfigEpochNotifierPerBlockString(t *testing.T) { - cfg := ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 123, - NumBlockPerEpoch: 456, - EpochNotificationPercentage: 789, - } - require.Equal(t, "{startEpochBlock=123, sizeEpoch=456, threshold=789%}", cfg.String()) - var cfg2 *ConfigEpochNotifierPerBlock - require.Equal(t, "nil", cfg2.String()) -} - -func TestStartingBlockEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 9, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 80, - }) - // EPOCH: ---0 ----+----1 -----+----2 ----+----3 ----+----4 ----+----5 ----+---- - // BLOCK: 9 19 29 39 49 - require.Equal(t, uint64(8), testData.sut.startingBlockEpoch(0)) - require.Equal(t, uint64(9), testData.sut.startingBlockEpoch(1)) - require.Equal(t, uint64(19), testData.sut.startingBlockEpoch(2)) -} - -func TestEpochNotifyPercentageEdgeCase0(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - testData.sut.Config.EpochNotificationPercentage = 0 - notify, epoch := testData.sut.isNotificationRequired(9, 0) - require.True(t, notify) - require.Equal(t, uint64(1), epoch) -} - -// if percent is 99 means at end of epoch, so in a config 0, epoch-size=10, -// 99% means last block of epoch -func TestEpochNotifyPercentageEdgeCase99(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - testData.sut.Config.EpochNotificationPercentage = 99 - notify, epoch := testData.sut.isNotificationRequired(9, 0) - require.True(t, notify) - require.Equal(t, uint64(1), epoch) -} - -func TestEpochStep(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 9, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 50, - }) - // EPOCH: ---0 ----+----1 -----+----2 ----+----3 ----+----4 ----+----5 ----+---- - // BLOCK: 9 19 29 39 49 - // start EPOCH#1 -> 9 - // end EPOCH#1 -> 19 - // start EPOCH#2 -> 19 - - tests := []struct { - name string - initialStatus internalStatus - blockNumber uint64 - expectedEvent bool - expectedEventEpoch uint64 - expectedEventPendingBlocks int - }{ - { - name: "First block of epoch, no notification until close to end", - initialStatus: internalStatus{lastBlockSeen: 8, waitingForEpoch: 0}, - blockNumber: 9, - expectedEvent: false, - expectedEventEpoch: 1, - expectedEventPendingBlocks: 0, - }, - { - name: "epoch#1 close to end, notify it!", - initialStatus: internalStatus{lastBlockSeen: 17, waitingForEpoch: 0}, - blockNumber: 18, - expectedEvent: true, - expectedEventEpoch: 1, // Finishing epoch 0 - expectedEventPendingBlocks: 1, // 19 - 18 - }, - { - name: "epoch#1 close to end, but already notified", - initialStatus: internalStatus{lastBlockSeen: 17, waitingForEpoch: 2}, - blockNumber: 18, - expectedEvent: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, event := testData.sut.step(tt.initialStatus, types.EventNewBlock{BlockNumber: tt.blockNumber, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, tt.expectedEvent, event != nil) - if event != nil { - require.Equal(t, tt.expectedEventEpoch, event.Epoch, "Epoch") - extraInfo, ok := event.ExtraInfo.(*ExtraInfoEventEpoch) - require.True(t, ok, "ExtraInfo") - require.Equal(t, tt.expectedEventPendingBlocks, extraInfo.PendingBlocks, "PendingBlocks") - } - }) - } -} - -func TestNewConfigEpochNotifierPerBlock(t *testing.T) { - _, err := NewConfigEpochNotifierPerBlock(nil, 1) - require.Error(t, err) - aggLayerMock := agglayer.NewAgglayerClientMock(t) - aggLayerMock.On("GetEpochConfiguration").Return(nil, fmt.Errorf("error")).Once() - _, err = NewConfigEpochNotifierPerBlock(aggLayerMock, 1) - require.Error(t, err) - cfgAggLayer := &agglayer.ClockConfiguration{ - GenesisBlock: 123, - EpochDuration: 456, - } - aggLayerMock.On("GetEpochConfiguration").Return(cfgAggLayer, nil).Once() - cfg, err := NewConfigEpochNotifierPerBlock(aggLayerMock, 1) - require.NoError(t, err) - require.Equal(t, uint64(123), cfg.StartingEpochBlock) - require.Equal(t, uint(456), cfg.NumBlockPerEpoch) -} - -func TestNotifyEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - ch := testData.sut.Subscribe("test") - chBlocks := make(chan types.EventNewBlock) - testData.blockNotifierMock.EXPECT().Subscribe(mock.Anything).Return(chBlocks) - testData.sut.StartAsync(testData.ctx) - chBlocks <- types.EventNewBlock{BlockNumber: 109, BlockFinalityType: etherman.LatestBlock} - epochEvent := <-ch - require.Equal(t, uint64(11), epochEvent.Epoch) - testData.ctx.Done() -} - -func TestStepSameEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - status := internalStatus{ - lastBlockSeen: 100, - waitingForEpoch: testData.sut.epochNumber(100), - } - newStatus, _ := testData.sut.step(status, types.EventNewBlock{BlockNumber: 103, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(103), newStatus.lastBlockSeen) - require.Equal(t, status.waitingForEpoch, newStatus.waitingForEpoch) -} - -func TestStepNotifyEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, nil) - status := internalStatus{ - lastBlockSeen: 100, - waitingForEpoch: testData.sut.epochNumber(100), - } - status, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 109, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(109), status.lastBlockSeen) - require.Equal(t, uint64(12), status.waitingForEpoch) -} - -func TestBlockEpochNumber(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 105, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 1, - }) - require.Equal(t, uint64(0), testData.sut.epochNumber(0)) - require.Equal(t, uint64(0), testData.sut.epochNumber(104)) - require.Equal(t, uint64(1), testData.sut.epochNumber(105)) - require.Equal(t, uint64(1), testData.sut.epochNumber(114)) - require.Equal(t, uint64(2), testData.sut.epochNumber(115)) - require.Equal(t, uint64(2), testData.sut.epochNumber(116)) - require.Equal(t, uint64(2), testData.sut.epochNumber(124)) - require.Equal(t, uint64(3), testData.sut.epochNumber(125)) -} - -func TestBlockBeforeEpoch(t *testing.T) { - testData := newNotifierPerBlockTestData(t, &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 105, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 1, - }) - status := internalStatus{ - lastBlockSeen: 104, - waitingForEpoch: testData.sut.epochNumber(104), - } - newStatus, _ := testData.sut.step(status, types.EventNewBlock{BlockNumber: 104, BlockFinalityType: etherman.LatestBlock}) - // We are previous block of first epoch, so we should do nothing - require.Equal(t, status, newStatus) - status = newStatus - // First block of first epoch - newStatus, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 105, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(105), newStatus.lastBlockSeen) - // Near end first epoch - newStatus, _ = testData.sut.step(status, types.EventNewBlock{BlockNumber: 114, BlockFinalityType: etherman.LatestBlock}) - require.Equal(t, uint64(114), newStatus.lastBlockSeen) -} - -type notifierPerBlockTestData struct { - sut *EpochNotifierPerBlock - blockNotifierMock *mocks.BlockNotifier - ctx context.Context -} - -func newNotifierPerBlockTestData(t *testing.T, config *ConfigEpochNotifierPerBlock) notifierPerBlockTestData { - t.Helper() - if config == nil { - config = &ConfigEpochNotifierPerBlock{ - StartingEpochBlock: 0, - NumBlockPerEpoch: 10, - EpochNotificationPercentage: 50, - } - } - blockNotifierMock := mocks.NewBlockNotifier(t) - logger := log.WithFields("test", "EpochNotifierPerBlock") - sut, err := NewEpochNotifierPerBlock(blockNotifierMock, logger, *config, nil) - require.NoError(t, err) - return notifierPerBlockTestData{ - sut: sut, - blockNotifierMock: blockNotifierMock, - ctx: context.TODO(), - } -} diff --git a/aggsender/generic_subscriber_impl.go b/aggsender/generic_subscriber_impl.go deleted file mode 100644 index e4251449d..000000000 --- a/aggsender/generic_subscriber_impl.go +++ /dev/null @@ -1,33 +0,0 @@ -package aggsender - -import "sync" - -type GenericSubscriberImpl[T any] struct { - // map of subscribers with names - subs map[chan T]string - mu sync.RWMutex -} - -func NewGenericSubscriberImpl[T any]() *GenericSubscriberImpl[T] { - return &GenericSubscriberImpl[T]{ - subs: make(map[chan T]string), - } -} - -func (g *GenericSubscriberImpl[T]) Subscribe(subscriberName string) <-chan T { - ch := make(chan T) - g.mu.Lock() - defer g.mu.Unlock() - g.subs[ch] = subscriberName - return ch -} - -func (g *GenericSubscriberImpl[T]) Publish(data T) { - g.mu.RLock() - defer g.mu.RUnlock() - for ch := range g.subs { - go func(ch chan T) { - ch <- data - }(ch) - } -} diff --git a/aggsender/mocks/agg_sender_storage.go b/aggsender/mocks/agg_sender_storage.go deleted file mode 100644 index 9c0d20a64..000000000 --- a/aggsender/mocks/agg_sender_storage.go +++ /dev/null @@ -1,355 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - agglayer "github.com/0xPolygon/cdk/agglayer" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/aggsender/types" -) - -// AggSenderStorage is an autogenerated mock type for the AggSenderStorage type -type AggSenderStorage struct { - mock.Mock -} - -type AggSenderStorage_Expecter struct { - mock *mock.Mock -} - -func (_m *AggSenderStorage) EXPECT() *AggSenderStorage_Expecter { - return &AggSenderStorage_Expecter{mock: &_m.Mock} -} - -// DeleteCertificate provides a mock function with given fields: ctx, certificateID -func (_m *AggSenderStorage) DeleteCertificate(ctx context.Context, certificateID common.Hash) error { - ret := _m.Called(ctx, certificateID) - - if len(ret) == 0 { - panic("no return value specified for DeleteCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { - r0 = rf(ctx, certificateID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorage_DeleteCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteCertificate' -type AggSenderStorage_DeleteCertificate_Call struct { - *mock.Call -} - -// DeleteCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificateID common.Hash -func (_e *AggSenderStorage_Expecter) DeleteCertificate(ctx interface{}, certificateID interface{}) *AggSenderStorage_DeleteCertificate_Call { - return &AggSenderStorage_DeleteCertificate_Call{Call: _e.mock.On("DeleteCertificate", ctx, certificateID)} -} - -func (_c *AggSenderStorage_DeleteCertificate_Call) Run(run func(ctx context.Context, certificateID common.Hash)) *AggSenderStorage_DeleteCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *AggSenderStorage_DeleteCertificate_Call) Return(_a0 error) *AggSenderStorage_DeleteCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorage_DeleteCertificate_Call) RunAndReturn(run func(context.Context, common.Hash) error) *AggSenderStorage_DeleteCertificate_Call { - _c.Call.Return(run) - return _c -} - -// GetCertificateByHeight provides a mock function with given fields: height -func (_m *AggSenderStorage) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { - ret := _m.Called(height) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateByHeight") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (*types.CertificateInfo, error)); ok { - return rf(height) - } - if rf, ok := ret.Get(0).(func(uint64) *types.CertificateInfo); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorage_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' -type AggSenderStorage_GetCertificateByHeight_Call struct { - *mock.Call -} - -// GetCertificateByHeight is a helper method to define mock.On call -// - height uint64 -func (_e *AggSenderStorage_Expecter) GetCertificateByHeight(height interface{}) *AggSenderStorage_GetCertificateByHeight_Call { - return &AggSenderStorage_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} -} - -func (_c *AggSenderStorage_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggSenderStorage_GetCertificateByHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint64)) - }) - return _c -} - -func (_c *AggSenderStorage_GetCertificateByHeight_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificateByHeight_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorage_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (*types.CertificateInfo, error)) *AggSenderStorage_GetCertificateByHeight_Call { - _c.Call.Return(run) - return _c -} - -// GetCertificatesByStatus provides a mock function with given fields: status -func (_m *AggSenderStorage) GetCertificatesByStatus(status []agglayer.CertificateStatus) ([]*types.CertificateInfo, error) { - ret := _m.Called(status) - - if len(ret) == 0 { - panic("no return value specified for GetCertificatesByStatus") - } - - var r0 []*types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)); ok { - return rf(status) - } - if rf, ok := ret.Get(0).(func([]agglayer.CertificateStatus) []*types.CertificateInfo); ok { - r0 = rf(status) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func([]agglayer.CertificateStatus) error); ok { - r1 = rf(status) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorage_GetCertificatesByStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificatesByStatus' -type AggSenderStorage_GetCertificatesByStatus_Call struct { - *mock.Call -} - -// GetCertificatesByStatus is a helper method to define mock.On call -// - status []agglayer.CertificateStatus -func (_e *AggSenderStorage_Expecter) GetCertificatesByStatus(status interface{}) *AggSenderStorage_GetCertificatesByStatus_Call { - return &AggSenderStorage_GetCertificatesByStatus_Call{Call: _e.mock.On("GetCertificatesByStatus", status)} -} - -func (_c *AggSenderStorage_GetCertificatesByStatus_Call) Run(run func(status []agglayer.CertificateStatus)) *AggSenderStorage_GetCertificatesByStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].([]agglayer.CertificateStatus)) - }) - return _c -} - -func (_c *AggSenderStorage_GetCertificatesByStatus_Call) Return(_a0 []*types.CertificateInfo, _a1 error) *AggSenderStorage_GetCertificatesByStatus_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorage_GetCertificatesByStatus_Call) RunAndReturn(run func([]agglayer.CertificateStatus) ([]*types.CertificateInfo, error)) *AggSenderStorage_GetCertificatesByStatus_Call { - _c.Call.Return(run) - return _c -} - -// GetLastSentCertificate provides a mock function with no fields -func (_m *AggSenderStorage) GetLastSentCertificate() (*types.CertificateInfo, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetLastSentCertificate") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func() (*types.CertificateInfo, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *types.CertificateInfo); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggSenderStorage_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' -type AggSenderStorage_GetLastSentCertificate_Call struct { - *mock.Call -} - -// GetLastSentCertificate is a helper method to define mock.On call -func (_e *AggSenderStorage_Expecter) GetLastSentCertificate() *AggSenderStorage_GetLastSentCertificate_Call { - return &AggSenderStorage_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} -} - -func (_c *AggSenderStorage_GetLastSentCertificate_Call) Run(run func()) *AggSenderStorage_GetLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggSenderStorage_GetLastSentCertificate_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggSenderStorage_GetLastSentCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggSenderStorage_GetLastSentCertificate_Call) RunAndReturn(run func() (*types.CertificateInfo, error)) *AggSenderStorage_GetLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// SaveLastSentCertificate provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorage) SaveLastSentCertificate(ctx context.Context, certificate types.CertificateInfo) error { - ret := _m.Called(ctx, certificate) - - if len(ret) == 0 { - panic("no return value specified for SaveLastSentCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { - r0 = rf(ctx, certificate) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorage_SaveLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SaveLastSentCertificate' -type AggSenderStorage_SaveLastSentCertificate_Call struct { - *mock.Call -} - -// SaveLastSentCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificate types.CertificateInfo -func (_e *AggSenderStorage_Expecter) SaveLastSentCertificate(ctx interface{}, certificate interface{}) *AggSenderStorage_SaveLastSentCertificate_Call { - return &AggSenderStorage_SaveLastSentCertificate_Call{Call: _e.mock.On("SaveLastSentCertificate", ctx, certificate)} -} - -func (_c *AggSenderStorage_SaveLastSentCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_SaveLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateInfo)) - }) - return _c -} - -func (_c *AggSenderStorage_SaveLastSentCertificate_Call) Return(_a0 error) *AggSenderStorage_SaveLastSentCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorage_SaveLastSentCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_SaveLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// UpdateCertificate provides a mock function with given fields: ctx, certificate -func (_m *AggSenderStorage) UpdateCertificate(ctx context.Context, certificate types.CertificateInfo) error { - ret := _m.Called(ctx, certificate) - - if len(ret) == 0 { - panic("no return value specified for UpdateCertificate") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.CertificateInfo) error); ok { - r0 = rf(ctx, certificate) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AggSenderStorage_UpdateCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCertificate' -type AggSenderStorage_UpdateCertificate_Call struct { - *mock.Call -} - -// UpdateCertificate is a helper method to define mock.On call -// - ctx context.Context -// - certificate types.CertificateInfo -func (_e *AggSenderStorage_Expecter) UpdateCertificate(ctx interface{}, certificate interface{}) *AggSenderStorage_UpdateCertificate_Call { - return &AggSenderStorage_UpdateCertificate_Call{Call: _e.mock.On("UpdateCertificate", ctx, certificate)} -} - -func (_c *AggSenderStorage_UpdateCertificate_Call) Run(run func(ctx context.Context, certificate types.CertificateInfo)) *AggSenderStorage_UpdateCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(types.CertificateInfo)) - }) - return _c -} - -func (_c *AggSenderStorage_UpdateCertificate_Call) Return(_a0 error) *AggSenderStorage_UpdateCertificate_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggSenderStorage_UpdateCertificate_Call) RunAndReturn(run func(context.Context, types.CertificateInfo) error) *AggSenderStorage_UpdateCertificate_Call { - _c.Call.Return(run) - return _c -} - -// NewAggSenderStorage creates a new instance of AggSenderStorage. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggSenderStorage(t interface { - mock.TestingT - Cleanup(func()) -}) *AggSenderStorage { - mock := &AggSenderStorage{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/aggsender_interface.go b/aggsender/mocks/aggsender_interface.go deleted file mode 100644 index bfd7e8853..000000000 --- a/aggsender/mocks/aggsender_interface.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// AggsenderInterface is an autogenerated mock type for the aggsenderInterface type -type AggsenderInterface struct { - mock.Mock -} - -type AggsenderInterface_Expecter struct { - mock *mock.Mock -} - -func (_m *AggsenderInterface) EXPECT() *AggsenderInterface_Expecter { - return &AggsenderInterface_Expecter{mock: &_m.Mock} -} - -// Info provides a mock function with given fields: -func (_m *AggsenderInterface) Info() types.AggsenderInfo { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Info") - } - - var r0 types.AggsenderInfo - if rf, ok := ret.Get(0).(func() types.AggsenderInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.AggsenderInfo) - } - - return r0 -} - -// AggsenderInterface_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' -type AggsenderInterface_Info_Call struct { - *mock.Call -} - -// Info is a helper method to define mock.On call -func (_e *AggsenderInterface_Expecter) Info() *AggsenderInterface_Info_Call { - return &AggsenderInterface_Info_Call{Call: _e.mock.On("Info")} -} - -func (_c *AggsenderInterface_Info_Call) Run(run func()) *AggsenderInterface_Info_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderInterface_Info_Call) Return(_a0 types.AggsenderInfo) *AggsenderInterface_Info_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *AggsenderInterface_Info_Call) RunAndReturn(run func() types.AggsenderInfo) *AggsenderInterface_Info_Call { - _c.Call.Return(run) - return _c -} - -// NewAggsenderInterface creates a new instance of AggsenderInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggsenderInterface(t interface { - mock.TestingT - Cleanup(func()) -}) *AggsenderInterface { - mock := &AggsenderInterface{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/aggsender_storer.go b/aggsender/mocks/aggsender_storer.go deleted file mode 100644 index ed17ea18c..000000000 --- a/aggsender/mocks/aggsender_storer.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// AggsenderStorer is an autogenerated mock type for the aggsenderStorer type -type AggsenderStorer struct { - mock.Mock -} - -type AggsenderStorer_Expecter struct { - mock *mock.Mock -} - -func (_m *AggsenderStorer) EXPECT() *AggsenderStorer_Expecter { - return &AggsenderStorer_Expecter{mock: &_m.Mock} -} - -// GetCertificateByHeight provides a mock function with given fields: height -func (_m *AggsenderStorer) GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) { - ret := _m.Called(height) - - if len(ret) == 0 { - panic("no return value specified for GetCertificateByHeight") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (*types.CertificateInfo, error)); ok { - return rf(height) - } - if rf, ok := ret.Get(0).(func(uint64) *types.CertificateInfo); ok { - r0 = rf(height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderStorer_GetCertificateByHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCertificateByHeight' -type AggsenderStorer_GetCertificateByHeight_Call struct { - *mock.Call -} - -// GetCertificateByHeight is a helper method to define mock.On call -// - height uint64 -func (_e *AggsenderStorer_Expecter) GetCertificateByHeight(height interface{}) *AggsenderStorer_GetCertificateByHeight_Call { - return &AggsenderStorer_GetCertificateByHeight_Call{Call: _e.mock.On("GetCertificateByHeight", height)} -} - -func (_c *AggsenderStorer_GetCertificateByHeight_Call) Run(run func(height uint64)) *AggsenderStorer_GetCertificateByHeight_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint64)) - }) - return _c -} - -func (_c *AggsenderStorer_GetCertificateByHeight_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggsenderStorer_GetCertificateByHeight_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderStorer_GetCertificateByHeight_Call) RunAndReturn(run func(uint64) (*types.CertificateInfo, error)) *AggsenderStorer_GetCertificateByHeight_Call { - _c.Call.Return(run) - return _c -} - -// GetLastSentCertificate provides a mock function with given fields: -func (_m *AggsenderStorer) GetLastSentCertificate() (*types.CertificateInfo, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetLastSentCertificate") - } - - var r0 *types.CertificateInfo - var r1 error - if rf, ok := ret.Get(0).(func() (*types.CertificateInfo, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *types.CertificateInfo); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.CertificateInfo) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// AggsenderStorer_GetLastSentCertificate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastSentCertificate' -type AggsenderStorer_GetLastSentCertificate_Call struct { - *mock.Call -} - -// GetLastSentCertificate is a helper method to define mock.On call -func (_e *AggsenderStorer_Expecter) GetLastSentCertificate() *AggsenderStorer_GetLastSentCertificate_Call { - return &AggsenderStorer_GetLastSentCertificate_Call{Call: _e.mock.On("GetLastSentCertificate")} -} - -func (_c *AggsenderStorer_GetLastSentCertificate_Call) Run(run func()) *AggsenderStorer_GetLastSentCertificate_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *AggsenderStorer_GetLastSentCertificate_Call) Return(_a0 *types.CertificateInfo, _a1 error) *AggsenderStorer_GetLastSentCertificate_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *AggsenderStorer_GetLastSentCertificate_Call) RunAndReturn(run func() (*types.CertificateInfo, error)) *AggsenderStorer_GetLastSentCertificate_Call { - _c.Call.Return(run) - return _c -} - -// NewAggsenderStorer creates a new instance of AggsenderStorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewAggsenderStorer(t interface { - mock.TestingT - Cleanup(func()) -}) *AggsenderStorer { - mock := &AggsenderStorer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/block_notifier.go b/aggsender/mocks/block_notifier.go deleted file mode 100644 index 24d751b36..000000000 --- a/aggsender/mocks/block_notifier.go +++ /dev/null @@ -1,128 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// BlockNotifier is an autogenerated mock type for the BlockNotifier type -type BlockNotifier struct { - mock.Mock -} - -type BlockNotifier_Expecter struct { - mock *mock.Mock -} - -func (_m *BlockNotifier) EXPECT() *BlockNotifier_Expecter { - return &BlockNotifier_Expecter{mock: &_m.Mock} -} - -// String provides a mock function with no fields -func (_m *BlockNotifier) String() string { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for String") - } - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// BlockNotifier_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' -type BlockNotifier_String_Call struct { - *mock.Call -} - -// String is a helper method to define mock.On call -func (_e *BlockNotifier_Expecter) String() *BlockNotifier_String_Call { - return &BlockNotifier_String_Call{Call: _e.mock.On("String")} -} - -func (_c *BlockNotifier_String_Call) Run(run func()) *BlockNotifier_String_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *BlockNotifier_String_Call) Return(_a0 string) *BlockNotifier_String_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *BlockNotifier_String_Call) RunAndReturn(run func() string) *BlockNotifier_String_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *BlockNotifier) Subscribe(id string) <-chan types.EventNewBlock { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 <-chan types.EventNewBlock - if rf, ok := ret.Get(0).(func(string) <-chan types.EventNewBlock); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan types.EventNewBlock) - } - } - - return r0 -} - -// BlockNotifier_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type BlockNotifier_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *BlockNotifier_Expecter) Subscribe(id interface{}) *BlockNotifier_Subscribe_Call { - return &BlockNotifier_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *BlockNotifier_Subscribe_Call) Run(run func(id string)) *BlockNotifier_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *BlockNotifier_Subscribe_Call) Return(_a0 <-chan types.EventNewBlock) *BlockNotifier_Subscribe_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *BlockNotifier_Subscribe_Call) RunAndReturn(run func(string) <-chan types.EventNewBlock) *BlockNotifier_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewBlockNotifier creates a new instance of BlockNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewBlockNotifier(t interface { - mock.TestingT - Cleanup(func()) -}) *BlockNotifier { - mock := &BlockNotifier{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/epoch_notifier.go b/aggsender/mocks/epoch_notifier.go deleted file mode 100644 index 0da06d93d..000000000 --- a/aggsender/mocks/epoch_notifier.go +++ /dev/null @@ -1,163 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - types "github.com/0xPolygon/cdk/aggsender/types" - mock "github.com/stretchr/testify/mock" -) - -// EpochNotifier is an autogenerated mock type for the EpochNotifier type -type EpochNotifier struct { - mock.Mock -} - -type EpochNotifier_Expecter struct { - mock *mock.Mock -} - -func (_m *EpochNotifier) EXPECT() *EpochNotifier_Expecter { - return &EpochNotifier_Expecter{mock: &_m.Mock} -} - -// Start provides a mock function with given fields: ctx -func (_m *EpochNotifier) Start(ctx context.Context) { - _m.Called(ctx) -} - -// EpochNotifier_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' -type EpochNotifier_Start_Call struct { - *mock.Call -} - -// Start is a helper method to define mock.On call -// - ctx context.Context -func (_e *EpochNotifier_Expecter) Start(ctx interface{}) *EpochNotifier_Start_Call { - return &EpochNotifier_Start_Call{Call: _e.mock.On("Start", ctx)} -} - -func (_c *EpochNotifier_Start_Call) Run(run func(ctx context.Context)) *EpochNotifier_Start_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EpochNotifier_Start_Call) Return() *EpochNotifier_Start_Call { - _c.Call.Return() - return _c -} - -func (_c *EpochNotifier_Start_Call) RunAndReturn(run func(context.Context)) *EpochNotifier_Start_Call { - _c.Run(run) - return _c -} - -// String provides a mock function with no fields -func (_m *EpochNotifier) String() string { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for String") - } - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// EpochNotifier_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String' -type EpochNotifier_String_Call struct { - *mock.Call -} - -// String is a helper method to define mock.On call -func (_e *EpochNotifier_Expecter) String() *EpochNotifier_String_Call { - return &EpochNotifier_String_Call{Call: _e.mock.On("String")} -} - -func (_c *EpochNotifier_String_Call) Run(run func()) *EpochNotifier_String_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *EpochNotifier_String_Call) Return(_a0 string) *EpochNotifier_String_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EpochNotifier_String_Call) RunAndReturn(run func() string) *EpochNotifier_String_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *EpochNotifier) Subscribe(id string) <-chan types.EpochEvent { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 <-chan types.EpochEvent - if rf, ok := ret.Get(0).(func(string) <-chan types.EpochEvent); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan types.EpochEvent) - } - } - - return r0 -} - -// EpochNotifier_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type EpochNotifier_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *EpochNotifier_Expecter) Subscribe(id interface{}) *EpochNotifier_Subscribe_Call { - return &EpochNotifier_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *EpochNotifier_Subscribe_Call) Run(run func(id string)) *EpochNotifier_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *EpochNotifier_Subscribe_Call) Return(_a0 <-chan types.EpochEvent) *EpochNotifier_Subscribe_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EpochNotifier_Subscribe_Call) RunAndReturn(run func(string) <-chan types.EpochEvent) *EpochNotifier_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewEpochNotifier creates a new instance of EpochNotifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEpochNotifier(t interface { - mock.TestingT - Cleanup(func()) -}) *EpochNotifier { - mock := &EpochNotifier{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/eth_client.go b/aggsender/mocks/eth_client.go deleted file mode 100644 index 6a68de414..000000000 --- a/aggsender/mocks/eth_client.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - big "math/big" - - coretypes "github.com/ethereum/go-ethereum/core/types" - - mock "github.com/stretchr/testify/mock" -) - -// EthClient is an autogenerated mock type for the EthClient type -type EthClient struct { - mock.Mock -} - -type EthClient_Expecter struct { - mock *mock.Mock -} - -func (_m *EthClient) EXPECT() *EthClient_Expecter { - return &EthClient_Expecter{mock: &_m.Mock} -} - -// BlockNumber provides a mock function with given fields: ctx -func (_m *EthClient) BlockNumber(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClient_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type EthClient_BlockNumber_Call struct { - *mock.Call -} - -// BlockNumber is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClient_Expecter) BlockNumber(ctx interface{}) *EthClient_BlockNumber_Call { - return &EthClient_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} -} - -func (_c *EthClient_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClient_BlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClient_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClient_BlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClient_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClient_BlockNumber_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *EthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*coretypes.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *coretypes.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*coretypes.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *coretypes.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClient_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type EthClient_HeaderByNumber_Call struct { - *mock.Call -} - -// HeaderByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClient_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClient_HeaderByNumber_Call { - return &EthClient_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} -} - -func (_c *EthClient_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClient_HeaderByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClient_HeaderByNumber_Call) Return(_a0 *coretypes.Header, _a1 error) *EthClient_HeaderByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClient_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*coretypes.Header, error)) *EthClient_HeaderByNumber_Call { - _c.Call.Return(run) - return _c -} - -// NewEthClient creates a new instance of EthClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthClient(t interface { - mock.TestingT - Cleanup(func()) -}) *EthClient { - mock := &EthClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/generic_subscriber.go b/aggsender/mocks/generic_subscriber.go deleted file mode 100644 index 59a276428..000000000 --- a/aggsender/mocks/generic_subscriber.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// GenericSubscriber is an autogenerated mock type for the GenericSubscriber type -type GenericSubscriber[T interface{}] struct { - mock.Mock -} - -type GenericSubscriber_Expecter[T interface{}] struct { - mock *mock.Mock -} - -func (_m *GenericSubscriber[T]) EXPECT() *GenericSubscriber_Expecter[T] { - return &GenericSubscriber_Expecter[T]{mock: &_m.Mock} -} - -// Publish provides a mock function with given fields: data -func (_m *GenericSubscriber[T]) Publish(data T) { - _m.Called(data) -} - -// GenericSubscriber_Publish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Publish' -type GenericSubscriber_Publish_Call[T interface{}] struct { - *mock.Call -} - -// Publish is a helper method to define mock.On call -// - data T -func (_e *GenericSubscriber_Expecter[T]) Publish(data interface{}) *GenericSubscriber_Publish_Call[T] { - return &GenericSubscriber_Publish_Call[T]{Call: _e.mock.On("Publish", data)} -} - -func (_c *GenericSubscriber_Publish_Call[T]) Run(run func(data T)) *GenericSubscriber_Publish_Call[T] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(T)) - }) - return _c -} - -func (_c *GenericSubscriber_Publish_Call[T]) Return() *GenericSubscriber_Publish_Call[T] { - _c.Call.Return() - return _c -} - -func (_c *GenericSubscriber_Publish_Call[T]) RunAndReturn(run func(T)) *GenericSubscriber_Publish_Call[T] { - _c.Run(run) - return _c -} - -// Subscribe provides a mock function with given fields: subscriberName -func (_m *GenericSubscriber[T]) Subscribe(subscriberName string) <-chan T { - ret := _m.Called(subscriberName) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 <-chan T - if rf, ok := ret.Get(0).(func(string) <-chan T); ok { - r0 = rf(subscriberName) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan T) - } - } - - return r0 -} - -// GenericSubscriber_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type GenericSubscriber_Subscribe_Call[T interface{}] struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - subscriberName string -func (_e *GenericSubscriber_Expecter[T]) Subscribe(subscriberName interface{}) *GenericSubscriber_Subscribe_Call[T] { - return &GenericSubscriber_Subscribe_Call[T]{Call: _e.mock.On("Subscribe", subscriberName)} -} - -func (_c *GenericSubscriber_Subscribe_Call[T]) Run(run func(subscriberName string)) *GenericSubscriber_Subscribe_Call[T] { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *GenericSubscriber_Subscribe_Call[T]) Return(_a0 <-chan T) *GenericSubscriber_Subscribe_Call[T] { - _c.Call.Return(_a0) - return _c -} - -func (_c *GenericSubscriber_Subscribe_Call[T]) RunAndReturn(run func(string) <-chan T) *GenericSubscriber_Subscribe_Call[T] { - _c.Call.Return(run) - return _c -} - -// NewGenericSubscriber creates a new instance of GenericSubscriber. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewGenericSubscriber[T interface{}](t interface { - mock.TestingT - Cleanup(func()) -}) *GenericSubscriber[T] { - mock := &GenericSubscriber[T]{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/l1_info_tree_syncer.go b/aggsender/mocks/l1_info_tree_syncer.go deleted file mode 100644 index 70ac97de9..000000000 --- a/aggsender/mocks/l1_info_tree_syncer.go +++ /dev/null @@ -1,217 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" - - mock "github.com/stretchr/testify/mock" - - treetypes "github.com/0xPolygon/cdk/tree/types" -) - -// L1InfoTreeSyncer is an autogenerated mock type for the L1InfoTreeSyncer type -type L1InfoTreeSyncer struct { - mock.Mock -} - -type L1InfoTreeSyncer_Expecter struct { - mock *mock.Mock -} - -func (_m *L1InfoTreeSyncer) EXPECT() *L1InfoTreeSyncer_Expecter { - return &L1InfoTreeSyncer_Expecter{mock: &_m.Mock} -} - -// GetInfoByGlobalExitRoot provides a mock function with given fields: globalExitRoot -func (_m *L1InfoTreeSyncer) GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(globalExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetInfoByGlobalExitRoot") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(globalExitRoot) - } - if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(globalExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(globalExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByGlobalExitRoot' -type L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call struct { - *mock.Call -} - -// GetInfoByGlobalExitRoot is a helper method to define mock.On call -// - globalExitRoot common.Hash -func (_e *L1InfoTreeSyncer_Expecter) GetInfoByGlobalExitRoot(globalExitRoot interface{}) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - return &L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call{Call: _e.mock.On("GetInfoByGlobalExitRoot", globalExitRoot)} -} - -func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) Run(run func(globalExitRoot common.Hash)) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreeSyncer_GetInfoByGlobalExitRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot provides a mock function with given fields: ctx, index, root -func (_m *L1InfoTreeSyncer) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx context.Context, index uint32, root common.Hash) (treetypes.Proof, error) { - ret := _m.Called(ctx, index, root) - - if len(ret) == 0 { - panic("no return value specified for GetL1InfoTreeMerkleProofFromIndexToRoot") - } - - var r0 treetypes.Proof - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (treetypes.Proof, error)); ok { - return rf(ctx, index, root) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) treetypes.Proof); ok { - r0 = rf(ctx, index, root) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(treetypes.Proof) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { - r1 = rf(ctx, index, root) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeMerkleProofFromIndexToRoot' -type L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call struct { - *mock.Call -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -// - root common.Hash -func (_e *L1InfoTreeSyncer_Expecter) GetL1InfoTreeMerkleProofFromIndexToRoot(ctx interface{}, index interface{}, root interface{}) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - return &L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call{Call: _e.mock.On("GetL1InfoTreeMerkleProofFromIndexToRoot", ctx, index, root)} -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Run(run func(ctx context.Context, index uint32, root common.Hash)) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) Return(_a0 treetypes.Proof, _a1 error) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (treetypes.Proof, error)) *L1InfoTreeSyncer_GetL1InfoTreeMerkleProofFromIndexToRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetL1InfoTreeRootByIndex provides a mock function with given fields: ctx, index -func (_m *L1InfoTreeSyncer) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetL1InfoTreeRootByIndex") - } - - var r0 treetypes.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { - r0 = rf(ctx, index) - } else { - r0 = ret.Get(0).(treetypes.Root) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeRootByIndex' -type L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call struct { - *mock.Call -} - -// GetL1InfoTreeRootByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L1InfoTreeSyncer_Expecter) GetL1InfoTreeRootByIndex(ctx interface{}, index interface{}) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - return &L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call{Call: _e.mock.On("GetL1InfoTreeRootByIndex", ctx, index)} -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L1InfoTreeSyncer_GetL1InfoTreeRootByIndex_Call { - _c.Call.Return(run) - return _c -} - -// NewL1InfoTreeSyncer creates a new instance of L1InfoTreeSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL1InfoTreeSyncer(t interface { - mock.TestingT - Cleanup(func()) -}) *L1InfoTreeSyncer { - mock := &L1InfoTreeSyncer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/l2_bridge_syncer.go b/aggsender/mocks/l2_bridge_syncer.go deleted file mode 100644 index b8eeb0848..000000000 --- a/aggsender/mocks/l2_bridge_syncer.go +++ /dev/null @@ -1,423 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - bridgesync "github.com/0xPolygon/cdk/bridgesync" - common "github.com/ethereum/go-ethereum/common" - - context "context" - - etherman "github.com/0xPolygon/cdk/etherman" - - mock "github.com/stretchr/testify/mock" - - treetypes "github.com/0xPolygon/cdk/tree/types" -) - -// L2BridgeSyncer is an autogenerated mock type for the L2BridgeSyncer type -type L2BridgeSyncer struct { - mock.Mock -} - -type L2BridgeSyncer_Expecter struct { - mock *mock.Mock -} - -func (_m *L2BridgeSyncer) EXPECT() *L2BridgeSyncer_Expecter { - return &L2BridgeSyncer_Expecter{mock: &_m.Mock} -} - -// BlockFinality provides a mock function with no fields -func (_m *L2BridgeSyncer) BlockFinality() etherman.BlockNumberFinality { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for BlockFinality") - } - - var r0 etherman.BlockNumberFinality - if rf, ok := ret.Get(0).(func() etherman.BlockNumberFinality); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(etherman.BlockNumberFinality) - } - - return r0 -} - -// L2BridgeSyncer_BlockFinality_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockFinality' -type L2BridgeSyncer_BlockFinality_Call struct { - *mock.Call -} - -// BlockFinality is a helper method to define mock.On call -func (_e *L2BridgeSyncer_Expecter) BlockFinality() *L2BridgeSyncer_BlockFinality_Call { - return &L2BridgeSyncer_BlockFinality_Call{Call: _e.mock.On("BlockFinality")} -} - -func (_c *L2BridgeSyncer_BlockFinality_Call) Run(run func()) *L2BridgeSyncer_BlockFinality_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L2BridgeSyncer_BlockFinality_Call) Return(_a0 etherman.BlockNumberFinality) *L2BridgeSyncer_BlockFinality_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2BridgeSyncer_BlockFinality_Call) RunAndReturn(run func() etherman.BlockNumberFinality) *L2BridgeSyncer_BlockFinality_Call { - _c.Call.Return(run) - return _c -} - -// GetBlockByLER provides a mock function with given fields: ctx, ler -func (_m *L2BridgeSyncer) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { - ret := _m.Called(ctx, ler) - - if len(ret) == 0 { - panic("no return value specified for GetBlockByLER") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint64, error)); ok { - return rf(ctx, ler) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint64); ok { - r0 = rf(ctx, ler) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, ler) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetBlockByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByLER' -type L2BridgeSyncer_GetBlockByLER_Call struct { - *mock.Call -} - -// GetBlockByLER is a helper method to define mock.On call -// - ctx context.Context -// - ler common.Hash -func (_e *L2BridgeSyncer_Expecter) GetBlockByLER(ctx interface{}, ler interface{}) *L2BridgeSyncer_GetBlockByLER_Call { - return &L2BridgeSyncer_GetBlockByLER_Call{Call: _e.mock.On("GetBlockByLER", ctx, ler)} -} - -func (_c *L2BridgeSyncer_GetBlockByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *L2BridgeSyncer_GetBlockByLER_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetBlockByLER_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetBlockByLER_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetBlockByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (uint64, error)) *L2BridgeSyncer_GetBlockByLER_Call { - _c.Call.Return(run) - return _c -} - -// GetBridgesPublished provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncer) GetBridgesPublished(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Bridge, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetBridgesPublished") - } - - var r0 []bridgesync.Bridge - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Bridge); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Bridge) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetBridgesPublished_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBridgesPublished' -type L2BridgeSyncer_GetBridgesPublished_Call struct { - *mock.Call -} - -// GetBridgesPublished is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncer_Expecter) GetBridgesPublished(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetBridgesPublished_Call { - return &L2BridgeSyncer_GetBridgesPublished_Call{Call: _e.mock.On("GetBridgesPublished", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncer_GetBridgesPublished_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetBridgesPublished_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetBridgesPublished_Call) Return(_a0 []bridgesync.Bridge, _a1 error) *L2BridgeSyncer_GetBridgesPublished_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetBridgesPublished_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Bridge, error)) *L2BridgeSyncer_GetBridgesPublished_Call { - _c.Call.Return(run) - return _c -} - -// GetClaims provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *L2BridgeSyncer) GetClaims(ctx context.Context, fromBlock uint64, toBlock uint64) ([]bridgesync.Claim, error) { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetClaims") - } - - var r0 []bridgesync.Claim - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)); ok { - return rf(ctx, fromBlock, toBlock) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []bridgesync.Claim); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]bridgesync.Claim) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64) error); ok { - r1 = rf(ctx, fromBlock, toBlock) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetClaims_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaims' -type L2BridgeSyncer_GetClaims_Call struct { - *mock.Call -} - -// GetClaims is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *L2BridgeSyncer_Expecter) GetClaims(ctx interface{}, fromBlock interface{}, toBlock interface{}) *L2BridgeSyncer_GetClaims_Call { - return &L2BridgeSyncer_GetClaims_Call{Call: _e.mock.On("GetClaims", ctx, fromBlock, toBlock)} -} - -func (_c *L2BridgeSyncer_GetClaims_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetClaims_Call) Return(_a0 []bridgesync.Claim, _a1 error) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetClaims_Call) RunAndReturn(run func(context.Context, uint64, uint64) ([]bridgesync.Claim, error)) *L2BridgeSyncer_GetClaims_Call { - _c.Call.Return(run) - return _c -} - -// GetExitRootByIndex provides a mock function with given fields: ctx, index -func (_m *L2BridgeSyncer) GetExitRootByIndex(ctx context.Context, index uint32) (treetypes.Root, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetExitRootByIndex") - } - - var r0 treetypes.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (treetypes.Root, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) treetypes.Root); ok { - r0 = rf(ctx, index) - } else { - r0 = ret.Get(0).(treetypes.Root) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetExitRootByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExitRootByIndex' -type L2BridgeSyncer_GetExitRootByIndex_Call struct { - *mock.Call -} - -// GetExitRootByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L2BridgeSyncer_Expecter) GetExitRootByIndex(ctx interface{}, index interface{}) *L2BridgeSyncer_GetExitRootByIndex_Call { - return &L2BridgeSyncer_GetExitRootByIndex_Call{Call: _e.mock.On("GetExitRootByIndex", ctx, index)} -} - -func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L2BridgeSyncer_GetExitRootByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) Return(_a0 treetypes.Root, _a1 error) *L2BridgeSyncer_GetExitRootByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetExitRootByIndex_Call) RunAndReturn(run func(context.Context, uint32) (treetypes.Root, error)) *L2BridgeSyncer_GetExitRootByIndex_Call { - _c.Call.Return(run) - return _c -} - -// GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *L2BridgeSyncer) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetLastProcessedBlock") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2BridgeSyncer_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' -type L2BridgeSyncer_GetLastProcessedBlock_Call struct { - *mock.Call -} - -// GetLastProcessedBlock is a helper method to define mock.On call -// - ctx context.Context -func (_e *L2BridgeSyncer_Expecter) GetLastProcessedBlock(ctx interface{}) *L2BridgeSyncer_GetLastProcessedBlock_Call { - return &L2BridgeSyncer_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} -} - -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2BridgeSyncer_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2BridgeSyncer_GetLastProcessedBlock_Call { - _c.Call.Return(run) - return _c -} - -// OriginNetwork provides a mock function with no fields -func (_m *L2BridgeSyncer) OriginNetwork() uint32 { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for OriginNetwork") - } - - var r0 uint32 - if rf, ok := ret.Get(0).(func() uint32); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint32) - } - - return r0 -} - -// L2BridgeSyncer_OriginNetwork_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OriginNetwork' -type L2BridgeSyncer_OriginNetwork_Call struct { - *mock.Call -} - -// OriginNetwork is a helper method to define mock.On call -func (_e *L2BridgeSyncer_Expecter) OriginNetwork() *L2BridgeSyncer_OriginNetwork_Call { - return &L2BridgeSyncer_OriginNetwork_Call{Call: _e.mock.On("OriginNetwork")} -} - -func (_c *L2BridgeSyncer_OriginNetwork_Call) Run(run func()) *L2BridgeSyncer_OriginNetwork_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L2BridgeSyncer_OriginNetwork_Call) Return(_a0 uint32) *L2BridgeSyncer_OriginNetwork_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2BridgeSyncer_OriginNetwork_Call) RunAndReturn(run func() uint32) *L2BridgeSyncer_OriginNetwork_Call { - _c.Call.Return(run) - return _c -} - -// NewL2BridgeSyncer creates a new instance of L2BridgeSyncer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL2BridgeSyncer(t interface { - mock.TestingT - Cleanup(func()) -}) *L2BridgeSyncer { - mock := &L2BridgeSyncer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/mocks/logger.go b/aggsender/mocks/logger.go deleted file mode 100644 index b2a845ca9..000000000 --- a/aggsender/mocks/logger.go +++ /dev/null @@ -1,420 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import mock "github.com/stretchr/testify/mock" - -// Logger is an autogenerated mock type for the Logger type -type Logger struct { - mock.Mock -} - -type Logger_Expecter struct { - mock *mock.Mock -} - -func (_m *Logger) EXPECT() *Logger_Expecter { - return &Logger_Expecter{mock: &_m.Mock} -} - -// Debug provides a mock function with given fields: args -func (_m *Logger) Debug(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Debug_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debug' -type Logger_Debug_Call struct { - *mock.Call -} - -// Debug is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Debug(args ...interface{}) *Logger_Debug_Call { - return &Logger_Debug_Call{Call: _e.mock.On("Debug", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Debug_Call) Run(run func(args ...interface{})) *Logger_Debug_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Debug_Call) Return() *Logger_Debug_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Debug_Call) RunAndReturn(run func(...interface{})) *Logger_Debug_Call { - _c.Run(run) - return _c -} - -// Debugf provides a mock function with given fields: format, args -func (_m *Logger) Debugf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Debugf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugf' -type Logger_Debugf_Call struct { - *mock.Call -} - -// Debugf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Debugf(format interface{}, args ...interface{}) *Logger_Debugf_Call { - return &Logger_Debugf_Call{Call: _e.mock.On("Debugf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Debugf_Call) Run(run func(format string, args ...interface{})) *Logger_Debugf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Debugf_Call) Return() *Logger_Debugf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Debugf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Debugf_Call { - _c.Run(run) - return _c -} - -// Error provides a mock function with given fields: args -func (_m *Logger) Error(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error' -type Logger_Error_Call struct { - *mock.Call -} - -// Error is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Error(args ...interface{}) *Logger_Error_Call { - return &Logger_Error_Call{Call: _e.mock.On("Error", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Error_Call) Run(run func(args ...interface{})) *Logger_Error_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Error_Call) Return() *Logger_Error_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Error_Call) RunAndReturn(run func(...interface{})) *Logger_Error_Call { - _c.Run(run) - return _c -} - -// Errorf provides a mock function with given fields: format, args -func (_m *Logger) Errorf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf' -type Logger_Errorf_Call struct { - *mock.Call -} - -// Errorf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Errorf(format interface{}, args ...interface{}) *Logger_Errorf_Call { - return &Logger_Errorf_Call{Call: _e.mock.On("Errorf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Errorf_Call) Run(run func(format string, args ...interface{})) *Logger_Errorf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Errorf_Call) Return() *Logger_Errorf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Errorf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Errorf_Call { - _c.Run(run) - return _c -} - -// Fatalf provides a mock function with given fields: format, args -func (_m *Logger) Fatalf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Fatalf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Fatalf' -type Logger_Fatalf_Call struct { - *mock.Call -} - -// Fatalf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Fatalf(format interface{}, args ...interface{}) *Logger_Fatalf_Call { - return &Logger_Fatalf_Call{Call: _e.mock.On("Fatalf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Fatalf_Call) Run(run func(format string, args ...interface{})) *Logger_Fatalf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Fatalf_Call) Return() *Logger_Fatalf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Fatalf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Fatalf_Call { - _c.Run(run) - return _c -} - -// Info provides a mock function with given fields: args -func (_m *Logger) Info(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info' -type Logger_Info_Call struct { - *mock.Call -} - -// Info is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Info(args ...interface{}) *Logger_Info_Call { - return &Logger_Info_Call{Call: _e.mock.On("Info", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Info_Call) Run(run func(args ...interface{})) *Logger_Info_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Info_Call) Return() *Logger_Info_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Info_Call) RunAndReturn(run func(...interface{})) *Logger_Info_Call { - _c.Run(run) - return _c -} - -// Infof provides a mock function with given fields: format, args -func (_m *Logger) Infof(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof' -type Logger_Infof_Call struct { - *mock.Call -} - -// Infof is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Infof(format interface{}, args ...interface{}) *Logger_Infof_Call { - return &Logger_Infof_Call{Call: _e.mock.On("Infof", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Infof_Call) Run(run func(format string, args ...interface{})) *Logger_Infof_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Infof_Call) Return() *Logger_Infof_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Infof_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Infof_Call { - _c.Run(run) - return _c -} - -// Warn provides a mock function with given fields: args -func (_m *Logger) Warn(args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Warn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warn' -type Logger_Warn_Call struct { - *mock.Call -} - -// Warn is a helper method to define mock.On call -// - args ...interface{} -func (_e *Logger_Expecter) Warn(args ...interface{}) *Logger_Warn_Call { - return &Logger_Warn_Call{Call: _e.mock.On("Warn", - append([]interface{}{}, args...)...)} -} - -func (_c *Logger_Warn_Call) Run(run func(args ...interface{})) *Logger_Warn_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-0) - for i, a := range args[0:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(variadicArgs...) - }) - return _c -} - -func (_c *Logger_Warn_Call) Return() *Logger_Warn_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Warn_Call) RunAndReturn(run func(...interface{})) *Logger_Warn_Call { - _c.Run(run) - return _c -} - -// Warnf provides a mock function with given fields: format, args -func (_m *Logger) Warnf(format string, args ...interface{}) { - var _ca []interface{} - _ca = append(_ca, format) - _ca = append(_ca, args...) - _m.Called(_ca...) -} - -// Logger_Warnf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warnf' -type Logger_Warnf_Call struct { - *mock.Call -} - -// Warnf is a helper method to define mock.On call -// - format string -// - args ...interface{} -func (_e *Logger_Expecter) Warnf(format interface{}, args ...interface{}) *Logger_Warnf_Call { - return &Logger_Warnf_Call{Call: _e.mock.On("Warnf", - append([]interface{}{format}, args...)...)} -} - -func (_c *Logger_Warnf_Call) Run(run func(format string, args ...interface{})) *Logger_Warnf_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]interface{}, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(interface{}) - } - } - run(args[0].(string), variadicArgs...) - }) - return _c -} - -func (_c *Logger_Warnf_Call) Return() *Logger_Warnf_Call { - _c.Call.Return() - return _c -} - -func (_c *Logger_Warnf_Call) RunAndReturn(run func(string, ...interface{})) *Logger_Warnf_Call { - _c.Run(run) - return _c -} - -// NewLogger creates a new instance of Logger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewLogger(t interface { - mock.TestingT - Cleanup(func()) -}) *Logger { - mock := &Logger{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/aggsender/rpc/aggsender_rpc.go b/aggsender/rpc/aggsender_rpc.go deleted file mode 100644 index 6c0b69869..000000000 --- a/aggsender/rpc/aggsender_rpc.go +++ /dev/null @@ -1,79 +0,0 @@ -package aggsenderrpc - -import ( - "fmt" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/log" -) - -const ( - base10 = 10 -) - -type aggsenderStorer interface { - GetCertificateByHeight(height uint64) (*types.CertificateInfo, error) - GetLastSentCertificate() (*types.CertificateInfo, error) -} - -type aggsenderInterface interface { - Info() types.AggsenderInfo -} - -// AggsenderRPC is the RPC interface for the aggsender -type AggsenderRPC struct { - logger *log.Logger - storage aggsenderStorer - aggsender aggsenderInterface -} - -func NewAggsenderRPC( - logger *log.Logger, - storage aggsenderStorer, - aggsender aggsenderInterface, -) *AggsenderRPC { - return &AggsenderRPC{ - logger: logger, - storage: storage, - aggsender: aggsender, - } -} - -// Status returns the status of the aggsender -// curl -X POST http://localhost:5576/ -H "Con -application/json" \ -// -d '{"method":"aggsender_status", "params":[], "id":1}' -func (b *AggsenderRPC) Status() (interface{}, rpc.Error) { - info := b.aggsender.Info() - return info, nil -} - -// GetCertificateHeaderPerHeight returns the certificate header for the given height -// if param is `nil` it returns the last sent certificate -// latest: -// -// curl -X POST http://localhost:5576/ -H "Con -application/json" \ -// -d '{"method":"aggsender_getCertificateHeaderPerHeight", "params":[], "id":1}' -// -// specific height: -// -// curl -X POST http://localhost:5576/ -H "Con -application/json" \ -// -d '{"method":"aggsender_getCertificateHeaderPerHeight", "params":[$height], "id":1}' -func (b *AggsenderRPC) GetCertificateHeaderPerHeight(height *uint64) (interface{}, rpc.Error) { - var ( - certInfo *types.CertificateInfo - err error - ) - if height == nil { - certInfo, err = b.storage.GetLastSentCertificate() - } else { - certInfo, err = b.storage.GetCertificateByHeight(*height) - } - if err != nil { - return nil, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error getting certificate by height: %v", err)) - } - if certInfo == nil { - return nil, rpc.NewRPCError(rpc.NotFoundErrorCode, "certificate not found") - } - return certInfo, nil -} diff --git a/aggsender/rpc/aggsender_rpc_test.go b/aggsender/rpc/aggsender_rpc_test.go deleted file mode 100644 index 38e73ed07..000000000 --- a/aggsender/rpc/aggsender_rpc_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package aggsenderrpc - -import ( - "fmt" - "testing" - - "github.com/0xPolygon/cdk/aggsender/mocks" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/stretchr/testify/require" -) - -func TestAggsenderRPCStatus(t *testing.T) { - testData := newAggsenderData(t) - testData.mockAggsender.EXPECT().Info().Return(types.AggsenderInfo{}) - res, err := testData.sut.Status() - require.NoError(t, err) - require.NotNil(t, res) -} - -func TestAggsenderRPCGetCertificateHeaderPerHeight(t *testing.T) { - testData := newAggsenderData(t) - height := uint64(1) - cases := []struct { - name string - height *uint64 - certResult *types.CertificateInfo - certError error - expectedError string - expectedNil bool - }{ - { - name: "latest, no error", - certResult: &types.CertificateInfo{}, - certError: nil, - }, - { - name: "latest,no error, no cert", - certResult: nil, - certError: nil, - expectedError: "not found", - expectedNil: true, - }, - { - name: "latest,error", - certResult: &types.CertificateInfo{}, - certError: fmt.Errorf("my_error"), - expectedError: "my_error", - expectedNil: true, - }, - { - name: "hight, no error", - height: &height, - certResult: &types.CertificateInfo{}, - certError: nil, - }, - } - - for _, tt := range cases { - t.Run(tt.name, func(t *testing.T) { - if tt.height == nil { - testData.mockStore.EXPECT().GetLastSentCertificate().Return(tt.certResult, tt.certError).Once() - } else { - testData.mockStore.EXPECT().GetCertificateByHeight(*tt.height).Return(tt.certResult, tt.certError).Once() - } - res, err := testData.sut.GetCertificateHeaderPerHeight(tt.height) - if tt.expectedError != "" { - require.Error(t, err) - require.Contains(t, err.Error(), tt.expectedError) - } else { - require.NoError(t, err) - } - if tt.expectedNil { - require.Nil(t, res) - } else { - require.NotNil(t, res) - } - }) - } -} - -type aggsenderRPCTestData struct { - sut *AggsenderRPC - mockStore *mocks.AggsenderStorer - mockAggsender *mocks.AggsenderInterface -} - -func newAggsenderData(t *testing.T) *aggsenderRPCTestData { - t.Helper() - mockStore := mocks.NewAggsenderStorer(t) - mockAggsender := mocks.NewAggsenderInterface(t) - sut := NewAggsenderRPC(nil, mockStore, mockAggsender) - return &aggsenderRPCTestData{sut, mockStore, mockAggsender} -} diff --git a/aggsender/rpcclient/client.go b/aggsender/rpcclient/client.go deleted file mode 100644 index 7d1312fb1..000000000 --- a/aggsender/rpcclient/client.go +++ /dev/null @@ -1,58 +0,0 @@ -package rpcclient - -import ( - "encoding/json" - "fmt" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggsender/types" -) - -var jSONRPCCall = rpc.JSONRPCCall - -// Client wraps all the available endpoints of the data abailability committee node server -type Client struct { - url string -} - -func NewClient(url string) *Client { - return &Client{ - url: url, - } -} - -func (c *Client) GetStatus() (*types.AggsenderInfo, error) { - response, err := jSONRPCCall(c.url, "aggsender_status") - if err != nil { - return nil, err - } - - // Check if the response is an error - if response.Error != nil { - return nil, fmt.Errorf("error in the response calling aggsender_status: %v", response.Error) - } - result := types.AggsenderInfo{} - err = json.Unmarshal(response.Result, &result) - if err != nil { - return nil, err - } - return &result, nil -} - -func (c *Client) GetCertificateHeaderPerHeight(height *uint64) (*types.CertificateInfo, error) { - response, err := jSONRPCCall(c.url, "aggsender_getCertificateHeaderPerHeight", height) - if err != nil { - return nil, err - } - - // Check if the response is an error - if response.Error != nil { - return nil, fmt.Errorf("error in the response calling aggsender_getCertificateHeaderPerHeight: %v", response.Error) - } - cert := types.CertificateInfo{} - err = json.Unmarshal(response.Result, &cert) - if err != nil { - return nil, err - } - return &cert, nil -} diff --git a/aggsender/rpcclient/client_test.go b/aggsender/rpcclient/client_test.go deleted file mode 100644 index f831713e5..000000000 --- a/aggsender/rpcclient/client_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package rpcclient - -import ( - "encoding/json" - "testing" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/stretchr/testify/require" -) - -func TestGetCertificateHeaderPerHeight(t *testing.T) { - sut := NewClient("url") - height := uint64(1) - responseCert := types.CertificateInfo{} - responseCertJSON, err := json.Marshal(responseCert) - require.NoError(t, err) - response := rpc.Response{ - Result: responseCertJSON, - } - jSONRPCCall = func(_, _ string, _ ...interface{}) (rpc.Response, error) { - return response, nil - } - cert, err := sut.GetCertificateHeaderPerHeight(&height) - require.NoError(t, err) - require.NotNil(t, cert) - require.Equal(t, responseCert, *cert) -} - -func TestGetStatus(t *testing.T) { - sut := NewClient("url") - responseData := types.AggsenderInfo{} - responseDataJSON, err := json.Marshal(responseData) - require.NoError(t, err) - response := rpc.Response{ - Result: responseDataJSON, - } - jSONRPCCall = func(_, _ string, _ ...interface{}) (rpc.Response, error) { - return response, nil - } - result, err := sut.GetStatus() - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, responseData, *result) -} diff --git a/aggsender/types/block_notifier.go b/aggsender/types/block_notifier.go deleted file mode 100644 index 5dde27028..000000000 --- a/aggsender/types/block_notifier.go +++ /dev/null @@ -1,20 +0,0 @@ -package types - -import ( - "time" - - "github.com/0xPolygon/cdk/etherman" -) - -type EventNewBlock struct { - BlockNumber uint64 - BlockFinalityType etherman.BlockNumberFinality - BlockRate time.Duration -} - -// BlockNotifier is the interface that wraps the basic methods to notify a new block. -type BlockNotifier interface { - // NotifyEpochStarted notifies the epoch has started. - Subscribe(id string) <-chan EventNewBlock - String() string -} diff --git a/aggsender/types/certificate_build_params.go b/aggsender/types/certificate_build_params.go deleted file mode 100644 index 1ffd7563e..000000000 --- a/aggsender/types/certificate_build_params.go +++ /dev/null @@ -1,112 +0,0 @@ -package types - -import ( - "fmt" - - "github.com/0xPolygon/cdk/bridgesync" -) - -const ( - EstimatedSizeBridgeExit = 230 - EstimatedSizeClaim = 8000 - byteArrayJSONSizeFactor = 1.5 -) - -// CertificateBuildParams is a struct that holds the parameters to build a certificate -type CertificateBuildParams struct { - FromBlock uint64 - ToBlock uint64 - Bridges []bridgesync.Bridge - Claims []bridgesync.Claim - CreatedAt uint32 -} - -func (c *CertificateBuildParams) String() string { - return fmt.Sprintf("FromBlock: %d, ToBlock: %d, numBridges: %d, numClaims: %d, createdAt: %d", - c.FromBlock, c.ToBlock, c.NumberOfBridges(), c.NumberOfClaims(), c.CreatedAt) -} - -// Range create a new CertificateBuildParams with the given range -func (c *CertificateBuildParams) Range(fromBlock, toBlock uint64) (*CertificateBuildParams, error) { - if c.FromBlock == fromBlock && c.ToBlock == toBlock { - return c, nil - } - if c.FromBlock > fromBlock || c.ToBlock < toBlock { - return nil, fmt.Errorf("invalid range") - } - newCert := &CertificateBuildParams{ - FromBlock: fromBlock, - ToBlock: toBlock, - Bridges: make([]bridgesync.Bridge, 0), - Claims: make([]bridgesync.Claim, 0), - } - - for _, bridge := range c.Bridges { - if bridge.BlockNum >= fromBlock && bridge.BlockNum <= toBlock { - newCert.Bridges = append(newCert.Bridges, bridge) - } - } - - for _, claim := range c.Claims { - if claim.BlockNum >= fromBlock && claim.BlockNum <= toBlock { - newCert.Claims = append(newCert.Claims, claim) - } - } - return newCert, nil -} - -// NumberOfBridges returns the number of bridges in the certificate -func (c *CertificateBuildParams) NumberOfBridges() int { - if c == nil { - return 0 - } - return len(c.Bridges) -} - -// NumberOfClaims returns the number of claims in the certificate -func (c *CertificateBuildParams) NumberOfClaims() int { - if c == nil { - return 0 - } - return len(c.Claims) -} - -// NumberOfBlocks returns the number of blocks in the certificate -func (c *CertificateBuildParams) NumberOfBlocks() int { - if c == nil { - return 0 - } - return int(c.ToBlock - c.FromBlock + 1) -} - -// EstimatedSize returns the estimated size of the certificate -func (c *CertificateBuildParams) EstimatedSize() uint { - if c == nil { - return 0 - } - sizeBridges := int(0) - for _, bridge := range c.Bridges { - sizeBridges += EstimatedSizeBridgeExit - sizeBridges += int(byteArrayJSONSizeFactor * float32(len(bridge.Metadata))) - } - - sizeClaims := int(0) - for _, claim := range c.Claims { - sizeClaims += EstimatedSizeClaim - sizeClaims += int(byteArrayJSONSizeFactor * float32(len(claim.Metadata))) - } - return uint(sizeBridges + sizeClaims) -} - -// IsEmpty returns true if the certificate is empty -func (c *CertificateBuildParams) IsEmpty() bool { - return c.NumberOfBridges() == 0 && c.NumberOfClaims() == 0 -} - -// MaxDepoitCount returns the maximum deposit count in the certificate -func (c *CertificateBuildParams) MaxDepositCount() uint32 { - if c == nil || c.NumberOfBridges() == 0 { - return 0 - } - return c.Bridges[len(c.Bridges)-1].DepositCount -} diff --git a/aggsender/types/epoch_notifier.go b/aggsender/types/epoch_notifier.go deleted file mode 100644 index 426ad3622..000000000 --- a/aggsender/types/epoch_notifier.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -import ( - "context" - "fmt" -) - -// EpochEvent is the event that notifies the neear end epoch -type EpochEvent struct { - Epoch uint64 - // ExtraInfo if a detailed information about the epoch that depends on implementation - ExtraInfo fmt.Stringer -} - -func (e EpochEvent) String() string { - return fmt.Sprintf("EpochEvent: epoch=%d extra=%s", e.Epoch, e.ExtraInfo) -} - -type EpochNotifier interface { - // NotifyEpochStarted notifies the epoch is close to end. - Subscribe(id string) <-chan EpochEvent - // Start starts the notifier synchronously - Start(ctx context.Context) - String() string -} diff --git a/aggsender/types/generic_subscriber.go b/aggsender/types/generic_subscriber.go deleted file mode 100644 index 67038c5ce..000000000 --- a/aggsender/types/generic_subscriber.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -type GenericSubscriber[T any] interface { - Subscribe(subscriberName string) <-chan T - Publish(data T) -} diff --git a/aggsender/types/status.go b/aggsender/types/status.go deleted file mode 100644 index be28fe112..000000000 --- a/aggsender/types/status.go +++ /dev/null @@ -1,42 +0,0 @@ -package types - -import ( - "time" - - zkevm "github.com/0xPolygon/cdk" -) - -type AggsenderStatusType string - -const ( - StatusNone AggsenderStatusType = "none" - StatusCheckingInitialStage AggsenderStatusType = "checking_initial_stage" - StatusCertificateStage AggsenderStatusType = "certificate_stage" -) - -type AggsenderStatus struct { - Running bool `json:"running"` - StartTime time.Time `json:"start_time"` - Status AggsenderStatusType `json:"status"` - LastError string `json:"last_error"` -} - -type AggsenderInfo struct { - AggsenderStatus AggsenderStatus `json:"aggsender_status"` - Version zkevm.FullVersion - EpochNotifierDescription string `json:"epoch_notifier_description"` - NetworkID uint32 `json:"network_id"` -} - -func (a *AggsenderStatus) Start(startTime time.Time) { - a.Running = true - a.StartTime = startTime -} - -func (a *AggsenderStatus) SetLastError(err error) { - if err == nil { - a.LastError = "" - } else { - a.LastError = err.Error() - } -} diff --git a/aggsender/types/status_test.go b/aggsender/types/status_test.go deleted file mode 100644 index d48ca0043..000000000 --- a/aggsender/types/status_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package types - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAggsenderStatusSetLastError(t *testing.T) { - sut := AggsenderStatus{} - sut.SetLastError(nil) - require.Equal(t, "", sut.LastError) - sut.SetLastError(errors.New("error")) - require.Equal(t, "error", sut.LastError) -} diff --git a/aggsender/types/types.go b/aggsender/types/types.go deleted file mode 100644 index 6d9f75349..000000000 --- a/aggsender/types/types.go +++ /dev/null @@ -1,195 +0,0 @@ -package types - -import ( - "context" - "encoding/binary" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - treeTypes "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -// L1InfoTreeSyncer is an interface defining functions that an L1InfoTreeSyncer should implement -type L1InfoTreeSyncer interface { - GetInfoByGlobalExitRoot(globalExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) - GetL1InfoTreeMerkleProofFromIndexToRoot( - ctx context.Context, index uint32, root common.Hash, - ) (treeTypes.Proof, error) - GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) -} - -// L2BridgeSyncer is an interface defining functions that an L2BridgeSyncer should implement -type L2BridgeSyncer interface { - GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) - GetExitRootByIndex(ctx context.Context, index uint32) (treeTypes.Root, error) - GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Bridge, error) - GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]bridgesync.Claim, error) - OriginNetwork() uint32 - BlockFinality() etherman.BlockNumberFinality - GetLastProcessedBlock(ctx context.Context) (uint64, error) -} - -// EthClient is an interface defining functions that an EthClient should implement -type EthClient interface { - BlockNumber(ctx context.Context) (uint64, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) -} - -// Logger is an interface that defines the methods to log messages -type Logger interface { - Fatalf(format string, args ...interface{}) - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) -} - -type CertificateInfo struct { - Height uint64 `meddler:"height"` - RetryCount int `meddler:"retry_count"` - CertificateID common.Hash `meddler:"certificate_id,hash"` - // PreviousLocalExitRoot if it's nil means no reported - PreviousLocalExitRoot *common.Hash `meddler:"previous_local_exit_root,hash"` - NewLocalExitRoot common.Hash `meddler:"new_local_exit_root,hash"` - FromBlock uint64 `meddler:"from_block"` - ToBlock uint64 `meddler:"to_block"` - Status agglayer.CertificateStatus `meddler:"status"` - CreatedAt uint32 `meddler:"created_at"` - UpdatedAt uint32 `meddler:"updated_at"` - SignedCertificate string `meddler:"signed_certificate"` -} - -func (c *CertificateInfo) String() string { - if c == nil { - //nolint:all - return "nil" - } - previousLocalExitRoot := "nil" - if c.PreviousLocalExitRoot != nil { - previousLocalExitRoot = c.PreviousLocalExitRoot.String() - } - return fmt.Sprintf("aggsender.CertificateInfo: "+ - "Height: %d "+ - "RetryCount: %d "+ - "CertificateID: %s "+ - "PreviousLocalExitRoot: %s "+ - "NewLocalExitRoot: %s "+ - "Status: %s "+ - "FromBlock: %d "+ - "ToBlock: %d "+ - "CreatedAt: %s "+ - "UpdatedAt: %s", - c.Height, - c.RetryCount, - c.CertificateID.String(), - previousLocalExitRoot, - c.NewLocalExitRoot.String(), - c.Status.String(), - c.FromBlock, - c.ToBlock, - time.Unix(int64(c.CreatedAt), 0), - time.Unix(int64(c.UpdatedAt), 0), - ) -} - -// ID returns a string with the unique identifier of the cerificate (height+certificateID) -func (c *CertificateInfo) ID() string { - if c == nil { - return "nil" - } - return fmt.Sprintf("%d/%s (retry %d)", c.Height, c.CertificateID.String(), c.RetryCount) -} - -// IsClosed returns true if the certificate is closed (settled or inError) -func (c *CertificateInfo) IsClosed() bool { - if c == nil { - return false - } - return c.Status.IsClosed() -} - -// ElapsedTimeSinceCreation returns the time elapsed since the certificate was created -func (c *CertificateInfo) ElapsedTimeSinceCreation() time.Duration { - if c == nil { - return 0 - } - return time.Now().UTC().Sub(time.Unix(int64(c.CreatedAt), 0)) -} - -type CertificateMetadata struct { - // ToBlock contains the pre v1 value stored in the metadata certificate field - // is not stored in the hash post v1 - ToBlock uint64 - - // FromBlock is the block number from which the certificate contains data - FromBlock uint64 - - // Offset is the number of blocks from the FromBlock that the certificate contains - Offset uint32 - - // CreatedAt is the timestamp when the certificate was created - CreatedAt uint32 - - // Version is the version of the metadata - Version uint8 -} - -// NewCertificateMetadataFromHash returns a new CertificateMetadata from the given hash -func NewCertificateMetadata(fromBlock uint64, offset uint32, createdAt uint32) *CertificateMetadata { - return &CertificateMetadata{ - FromBlock: fromBlock, - Offset: offset, - CreatedAt: createdAt, - Version: 1, - } -} - -// NewCertificateMetadataFromHash returns a new CertificateMetadata from the given hash -func NewCertificateMetadataFromHash(hash common.Hash) *CertificateMetadata { - b := hash.Bytes() - - if b[0] < 1 { - return &CertificateMetadata{ - ToBlock: hash.Big().Uint64(), - } - } - - return &CertificateMetadata{ - Version: b[0], - FromBlock: binary.BigEndian.Uint64(b[1:9]), - Offset: binary.BigEndian.Uint32(b[9:13]), - CreatedAt: binary.BigEndian.Uint32(b[13:17]), - } -} - -// ToHash returns the hash of the metadata -func (c *CertificateMetadata) ToHash() common.Hash { - b := make([]byte, common.HashLength) // 32-byte hash - - // Encode version - b[0] = c.Version - - // Encode fromBlock - binary.BigEndian.PutUint64(b[1:9], c.FromBlock) - - // Encode offset - binary.BigEndian.PutUint32(b[9:13], c.Offset) - - // Encode createdAt - binary.BigEndian.PutUint32(b[13:17], c.CreatedAt) - - // Last 8 bytes remain as zero padding - - return common.BytesToHash(b) -} diff --git a/aggsender/types/types_test.go b/aggsender/types/types_test.go deleted file mode 100644 index 985127f9b..000000000 --- a/aggsender/types/types_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package types - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestMetadataConversions_toBlock_Only(t *testing.T) { - toBlock := uint64(123567890) - hash := common.BigToHash(new(big.Int).SetUint64(toBlock)) - meta := NewCertificateMetadataFromHash(hash) - require.Equal(t, toBlock, meta.ToBlock) -} - -func TestMetadataConversions(t *testing.T) { - fromBlock := uint64(123567890) - offset := uint32(1000) - createdAt := uint32(0) - meta := NewCertificateMetadata(fromBlock, offset, createdAt) - c := meta.ToHash() - extractBlock := NewCertificateMetadataFromHash(c) - require.Equal(t, fromBlock, extractBlock.FromBlock) - require.Equal(t, offset, extractBlock.Offset) - require.Equal(t, createdAt, extractBlock.CreatedAt) -} diff --git a/bridgesync/bridgesync.go b/bridgesync/bridgesync.go deleted file mode 100644 index 4f6f1e903..000000000 --- a/bridgesync/bridgesync.go +++ /dev/null @@ -1,286 +0,0 @@ -package bridgesync - -import ( - "context" - "time" - - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -const ( - bridgeSyncL1 = "BridgeSyncL1" - bridgeSyncL2 = "BridgeSyncL2" - downloadBufferSize = 1000 -) - -type ReorgDetector interface { - sync.ReorgDetector -} - -// BridgeSync manages the state of the exit tree for the bridge contract by processing Ethereum blockchain events. -type BridgeSync struct { - processor *processor - driver *sync.EVMDriver - - originNetwork uint32 - blockFinality etherman.BlockNumberFinality -} - -// NewL1 creates a bridge syncer that synchronizes the mainnet exit tree -func NewL1( - ctx context.Context, - dbPath string, - bridge common.Address, - syncBlockChunkSize uint64, - blockFinalityType etherman.BlockNumberFinality, - rd ReorgDetector, - ethClient EthClienter, - initialBlock uint64, - waitForNewBlocksPeriod time.Duration, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - originNetwork uint32, - syncFullClaims bool, - finalizedBlockType etherman.BlockNumberFinality, -) (*BridgeSync, error) { - return newBridgeSync( - ctx, - dbPath, - bridge, - syncBlockChunkSize, - blockFinalityType, - rd, - ethClient, - initialBlock, - bridgeSyncL1, - waitForNewBlocksPeriod, - retryAfterErrorPeriod, - maxRetryAttemptsAfterError, - originNetwork, - syncFullClaims, - finalizedBlockType, - ) -} - -// NewL2 creates a bridge syncer that synchronizes the local exit tree -func NewL2( - ctx context.Context, - dbPath string, - bridge common.Address, - syncBlockChunkSize uint64, - blockFinalityType etherman.BlockNumberFinality, - rd ReorgDetector, - ethClient EthClienter, - initialBlock uint64, - waitForNewBlocksPeriod time.Duration, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - originNetwork uint32, - syncFullClaims bool, - finalizedBlockType etherman.BlockNumberFinality, -) (*BridgeSync, error) { - return newBridgeSync( - ctx, - dbPath, - bridge, - syncBlockChunkSize, - blockFinalityType, - rd, - ethClient, - initialBlock, - bridgeSyncL2, - waitForNewBlocksPeriod, - retryAfterErrorPeriod, - maxRetryAttemptsAfterError, - originNetwork, - syncFullClaims, - finalizedBlockType, - ) -} - -func newBridgeSync( - ctx context.Context, - dbPath string, - bridge common.Address, - syncBlockChunkSize uint64, - blockFinalityType etherman.BlockNumberFinality, - rd ReorgDetector, - ethClient EthClienter, - initialBlock uint64, - layerID string, - waitForNewBlocksPeriod time.Duration, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - originNetwork uint32, - syncFullClaims bool, - finalizedBlockType etherman.BlockNumberFinality, -) (*BridgeSync, error) { - logger := log.WithFields("bridge-syncer", layerID) - processor, err := newProcessor(dbPath, logger) - if err != nil { - return nil, err - } - - lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) - if err != nil { - return nil, err - } - - if lastProcessedBlock < initialBlock { - err = processor.ProcessBlock(ctx, sync.Block{ - Num: initialBlock, - }) - if err != nil { - return nil, err - } - } - rh := &sync.RetryHandler{ - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - RetryAfterErrorPeriod: retryAfterErrorPeriod, - } - - appender, err := buildAppender(ethClient, bridge, syncFullClaims) - if err != nil { - return nil, err - } - downloader, err := sync.NewEVMDownloader( - layerID, - ethClient, - syncBlockChunkSize, - blockFinalityType, - waitForNewBlocksPeriod, - appender, - []common.Address{bridge}, - rh, - finalizedBlockType, - ) - if err != nil { - return nil, err - } - - driver, err := sync.NewEVMDriver(rd, processor, downloader, layerID, downloadBufferSize, rh) - if err != nil { - return nil, err - } - - logger.Infof( - "BridgeSyncer [%s] created:\n"+ - " dbPath: %s\n"+ - " initialBlock: %d\n"+ - " bridgeAddr: %s\n"+ - " syncFullClaims: %t\n"+ - " maxRetryAttemptsAfterError: %d\n"+ - " retryAfterErrorPeriod: %s\n"+ - " syncBlockChunkSize: %d\n"+ - " blockFinalityType: %s\n"+ - " waitForNewBlocksPeriod: %s", - layerID, - dbPath, - initialBlock, - bridge.String(), - syncFullClaims, - maxRetryAttemptsAfterError, - retryAfterErrorPeriod.String(), - syncBlockChunkSize, - blockFinalityType, - waitForNewBlocksPeriod.String(), - ) - - return &BridgeSync{ - processor: processor, - driver: driver, - originNetwork: originNetwork, - blockFinality: blockFinalityType, - }, nil -} - -// Start starts the synchronization process -func (s *BridgeSync) Start(ctx context.Context) { - s.driver.Sync(ctx) -} - -func (s *BridgeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - if s.processor.isHalted() { - return 0, sync.ErrInconsistentState - } - return s.processor.GetLastProcessedBlock(ctx) -} - -func (s *BridgeSync) GetBridgeRootByHash(ctx context.Context, root common.Hash) (*tree.Root, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.exitTree.GetRootByHash(ctx, root) -} - -func (s *BridgeSync) GetClaims(ctx context.Context, fromBlock, toBlock uint64) ([]Claim, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetClaims(ctx, fromBlock, toBlock) -} - -func (s *BridgeSync) GetBridges(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetBridges(ctx, fromBlock, toBlock) -} - -func (s *BridgeSync) GetBridgesPublished(ctx context.Context, fromBlock, toBlock uint64) ([]Bridge, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetBridgesPublished(ctx, fromBlock, toBlock) -} - -func (s *BridgeSync) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) { - if s.processor.isHalted() { - return tree.Proof{}, sync.ErrInconsistentState - } - return s.processor.exitTree.GetProof(ctx, depositCount, localExitRoot) -} - -func (s *BridgeSync) GetBlockByLER(ctx context.Context, ler common.Hash) (uint64, error) { - if s.processor.isHalted() { - return 0, sync.ErrInconsistentState - } - root, err := s.processor.exitTree.GetRootByHash(ctx, ler) - if err != nil { - return 0, err - } - return root.BlockNum, nil -} - -func (s *BridgeSync) GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - root, err := s.processor.exitTree.GetRootByHash(ctx, ler) - if err != nil { - return root, err - } - return root, nil -} - -// GetExitRootByIndex returns the root of the exit tree at the moment the leaf with the given index was added -func (s *BridgeSync) GetExitRootByIndex(ctx context.Context, index uint32) (tree.Root, error) { - if s.processor.isHalted() { - return tree.Root{}, sync.ErrInconsistentState - } - return s.processor.exitTree.GetRootByIndex(ctx, index) -} - -// OriginNetwork returns the network ID of the origin chain -func (s *BridgeSync) OriginNetwork() uint32 { - return s.originNetwork -} - -// BlockFinality returns the block finality type -func (s *BridgeSync) BlockFinality() etherman.BlockNumberFinality { - return s.blockFinality -} diff --git a/bridgesync/bridgesync_test.go b/bridgesync/bridgesync_test.go deleted file mode 100644 index 838967ebd..000000000 --- a/bridgesync/bridgesync_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package bridgesync - -import ( - "context" - "errors" - "path" - "testing" - "time" - - mocksbridgesync "github.com/0xPolygon/cdk/bridgesync/mocks" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -// Mock implementations for the interfaces -type MockEthClienter struct { - mock.Mock -} - -type MockBridgeContractor struct { - mock.Mock -} - -func TestNewLx(t *testing.T) { - ctx := context.Background() - dbPath := path.Join(t.TempDir(), "TestNewLx.sqlite") - bridge := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - syncBlockChunkSize := uint64(100) - blockFinalityType := etherman.SafeBlock - initialBlock := uint64(0) - waitForNewBlocksPeriod := time.Second * 10 - retryAfterErrorPeriod := time.Second * 5 - maxRetryAttemptsAfterError := 3 - originNetwork := uint32(1) - - mockEthClient := mocksbridgesync.NewEthClienter(t) - mockReorgDetector := mocksbridgesync.NewReorgDetector(t) - - mockReorgDetector.EXPECT().Subscribe(mock.Anything).Return(nil, nil) - - bridgeSync, err := NewL1( - ctx, - dbPath, - bridge, - syncBlockChunkSize, - blockFinalityType, - mockReorgDetector, - mockEthClient, - initialBlock, - waitForNewBlocksPeriod, - retryAfterErrorPeriod, - maxRetryAttemptsAfterError, - originNetwork, - false, - blockFinalityType, - ) - - assert.NoError(t, err) - assert.NotNil(t, bridgeSync) - assert.Equal(t, originNetwork, bridgeSync.OriginNetwork()) - assert.Equal(t, blockFinalityType, bridgeSync.BlockFinality()) - - bridgeSyncL2, err := NewL2( - ctx, - dbPath, - bridge, - syncBlockChunkSize, - blockFinalityType, - mockReorgDetector, - mockEthClient, - initialBlock, - waitForNewBlocksPeriod, - retryAfterErrorPeriod, - maxRetryAttemptsAfterError, - originNetwork, - false, - blockFinalityType, - ) - - assert.NoError(t, err) - assert.NotNil(t, bridgeSync) - assert.Equal(t, originNetwork, bridgeSyncL2.OriginNetwork()) - assert.Equal(t, blockFinalityType, bridgeSyncL2.BlockFinality()) -} - -func TestGetLastProcessedBlock(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetLastProcessedBlock(context.Background()) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetBridgeRootByHash(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetBridgeRootByHash(context.Background(), common.Hash{}) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetBridges(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetBridges(context.Background(), 0, 0) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetProof(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetProof(context.Background(), 0, common.Hash{}) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetBlockByLER(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetBlockByLER(context.Background(), common.Hash{}) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetRootByLER(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetRootByLER(context.Background(), common.Hash{}) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetExitRootByIndex(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetExitRootByIndex(context.Background(), 0) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetClaims(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetClaims(context.Background(), 0, 0) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetBridgesPublishedTopLevel(t *testing.T) { - s := BridgeSync{processor: &processor{halted: true}} - _, err := s.GetBridgesPublished(context.Background(), 0, 0) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} diff --git a/bridgesync/claimcalldata_test.go b/bridgesync/claimcalldata_test.go deleted file mode 100644 index ef2d60bdd..000000000 --- a/bridgesync/claimcalldata_test.go +++ /dev/null @@ -1,1942 +0,0 @@ -package bridgesync - -import ( - "context" - "math/big" - "os/exec" - "testing" - "time" - - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/test/contracts/claimmock" - "github.com/0xPolygon/cdk/test/contracts/claimmockcaller" - "github.com/0xPolygon/cdk/test/contracts/claimmocktest" - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/stretchr/testify/require" -) - -type testCase struct { - description string - bridgeAddr common.Address - log types.Log - expectedClaim Claim -} - -func TestClaimCalldata(t *testing.T) { - testCases := []testCase{} - // Setup Docker L1 - log.Debug("starting docker") - ctx := context.Background() - msg, err := exec.Command("bash", "-l", "-c", "docker compose up -d").CombinedOutput() - require.NoError(t, err, string(msg)) - time.Sleep(time.Second * 1) - defer func() { - msg, err = exec.Command("bash", "-l", "-c", "docker compose down").CombinedOutput() - require.NoError(t, err, string(msg)) - }() - log.Debug("docker started") - client, err := ethclient.Dial("http://localhost:8545") - require.NoError(t, err) - privateKey, err := crypto.HexToECDSA("ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80") - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(privateKey, big.NewInt(0).SetUint64(1337)) - require.NoError(t, err) - - // Deploy contracts - bridgeAddr, _, bridgeContract, err := claimmock.DeployClaimmock(auth, client) - require.NoError(t, err) - claimCallerAddr, _, claimCaller, err := claimmockcaller.DeployClaimmockcaller(auth, client, bridgeAddr) - require.NoError(t, err) - _, _, claimTest, err := claimmocktest.DeployClaimmocktest(auth, client, bridgeAddr, claimCallerAddr) - require.NoError(t, err) - - proofLocal := [32][32]byte{} - proofLocalH := tree.Proof{} - proofLocal[5] = common.HexToHash("beef") - proofLocalH[5] = common.HexToHash("beef") - proofRollup := [32][32]byte{} - proofRollupH := tree.Proof{} - proofRollup[4] = common.HexToHash("a1fa") - proofRollupH[4] = common.HexToHash("a1fa") - expectedClaim := Claim{ - OriginNetwork: 69, - OriginAddress: common.HexToAddress("ffaaffaa"), - DestinationAddress: common.HexToAddress("123456789"), - Amount: big.NewInt(3), - MainnetExitRoot: common.HexToHash("5ca1e"), - RollupExitRoot: common.HexToHash("dead"), - ProofLocalExitRoot: proofLocalH, - ProofRollupExitRoot: proofRollupH, - DestinationNetwork: 0, - Metadata: []byte{}, - GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), - } - expectedClaim2 := Claim{ - OriginNetwork: 87, - OriginAddress: common.HexToAddress("eebbeebb"), - DestinationAddress: common.HexToAddress("2233445566"), - Amount: big.NewInt(4), - MainnetExitRoot: common.HexToHash("5ca1e"), - RollupExitRoot: common.HexToHash("dead"), - ProofLocalExitRoot: proofLocalH, - ProofRollupExitRoot: proofRollupH, - DestinationNetwork: 0, - Metadata: []byte{}, - GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), - } - expectedClaim3 := Claim{ - OriginNetwork: 69, - OriginAddress: common.HexToAddress("ffaaffaa"), - DestinationAddress: common.HexToAddress("2233445566"), - Amount: big.NewInt(5), - MainnetExitRoot: common.HexToHash("5ca1e"), - RollupExitRoot: common.HexToHash("dead"), - ProofLocalExitRoot: proofLocalH, - ProofRollupExitRoot: proofRollupH, - DestinationNetwork: 0, - Metadata: []byte{}, - GlobalExitRoot: crypto.Keccak256Hash(common.HexToHash("5ca1e").Bytes(), common.HexToHash("dead").Bytes()), - } - auth.GasLimit = 999999 // for some reason gas estimation fails :( - - abi, err := claimmock.ClaimmockMetaData.GetAbi() - require.NoError(t, err) - - // direct call claim asset - expectedClaim.GlobalIndex = big.NewInt(421) - expectedClaim.IsMessage = false - tx, err := bridgeContract.ClaimAsset( - auth, - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - 0, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - nil, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err := client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "direct call to claim asset", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // indirect call claim asset - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(422) - tx, err = claimCaller.ClaimAsset( - auth, - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - 0, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - nil, - false, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "indirect call to claim asset", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // indirect call claim asset bytes - expectedClaim.GlobalIndex = big.NewInt(423) - expectedClaim.IsMessage = false - expectedClaimBytes, err := abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.ClaimBytes( - auth, - expectedClaimBytes, - false, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "indirect call to claim asset bytes", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // direct call claim message - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(424) - tx, err = bridgeContract.ClaimMessage( - auth, - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - 0, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - nil, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "direct call to claim message", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // indirect call claim message - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(425) - tx, err = claimCaller.ClaimMessage( - auth, - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - 0, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - nil, - false, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "indirect call to claim message", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // indirect call claim message bytes - expectedClaim.GlobalIndex = big.NewInt(426) - expectedClaim.IsMessage = true - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.ClaimBytes( - auth, - expectedClaimBytes, - false, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "indirect call to claim message bytes", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // indirect call claim message bytes - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim.IsMessage = true - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.ClaimBytes( - auth, - expectedClaimBytes, - true, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - log.Infof("%+v", r.Logs) - - reverted := [2]bool{false, false} - - // 2 indirect call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err := abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - // 2 indirect call claim message (diff global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(428) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(429) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message 1 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message 2 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - reverted = [2]bool{false, true} - - // 2 indirect call claim message (same global index) (1 ok, 1 reverted) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(430) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(430) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message (same globalIndex) (1 ok, 1 reverted)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // 2 indirect call claim message (diff global index) (1 ok, 1 reverted) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(431) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(432) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message (diff globalIndex) (1 ok, 1 reverted)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - reverted = [2]bool{true, false} - - // 2 indirect call claim message (same global index) (1 reverted, 1 ok) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(433) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(433) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message (same globalIndex) (reverted,ok)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - - // 2 indirect call claim message (diff global index) (1 reverted, 1 ok) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(434) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(435) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim message (diff globalIndex) (reverted,ok)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - - reverted = [2]bool{false, false} - - // 2 indirect call claim asset (same global index) - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(436) - expectedClaim2.IsMessage = false - expectedClaim2.GlobalIndex = big.NewInt(436) - expectedClaimBytes, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - // 2 indirect call claim asset (diff global index) - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(437) - expectedClaim2.IsMessage = false - expectedClaim2.GlobalIndex = big.NewInt(438) - expectedClaimBytes, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset 1 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset 2 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - reverted = [2]bool{false, true} - - // 2 indirect call claim asset (same global index) (1 ok, 1 reverted) - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(439) - expectedClaim2.IsMessage = false - expectedClaim2.GlobalIndex = big.NewInt(439) - expectedClaimBytes, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset (same globalIndex) (1 ok, 1 reverted)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - // 2 indirect call claim message (diff global index) (1 ok, 1 reverted) - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(440) - expectedClaim2.IsMessage = false - expectedClaim2.GlobalIndex = big.NewInt(441) - expectedClaimBytes, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset (diff globalIndex) (1 ok, 1 reverted)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - reverted = [2]bool{true, false} - - // 2 indirect call claim asset (same global index) (1 reverted, 1 ok) - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(442) - expectedClaim2.IsMessage = false - expectedClaim2.GlobalIndex = big.NewInt(442) - expectedClaimBytes, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset (same globalIndex) (reverted,ok)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - - // 2 indirect call claim asset (diff global index) (1 reverted, 1 ok) - expectedClaim.IsMessage = false - expectedClaim.GlobalIndex = big.NewInt(443) - expectedClaim2.IsMessage = false - expectedClaim2.GlobalIndex = big.NewInt(444) - expectedClaimBytes, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimAsset", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimCaller.Claim2Bytes( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect call claim asset (diff globalIndex) (reverted,ok)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - - // indirect + indirect call claim message bytes - expectedClaim.GlobalIndex = big.NewInt(426) - expectedClaim.IsMessage = true - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.ClaimTestInternal( - auth, - expectedClaimBytes, - false, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "indirect + indirect call to claim message bytes", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - reverted = [2]bool{false, false} - - // 2 indirect + indirect call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim2TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - reverted, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 indirect + indirect call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "2 indirect + indirect call claim message 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - reverted3 := [3]bool{false, false, false} - - // 3 ok (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err := abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "3 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - testCases = append(testCases, testCase{ - description: "3 ok (indirectx2, indirect, indirectx2) call claim message 3 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[2], - expectedClaim: expectedClaim3, - }) - - // 3 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(428) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(429) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "3 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "3 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - testCases = append(testCases, testCase{ - description: "3 ok (indirectx2, indirect, indirectx2) call claim message 3 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[2], - expectedClaim: expectedClaim3, - }) - - reverted3 = [3]bool{true, false, false} - - // 1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(428) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(429) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim3, - }) - - reverted3 = [3]bool{false, true, false} - - // 1 ok 1 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (diff global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(428) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(429) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim3, - }) - - reverted3 = [3]bool{false, false, true} - - // 1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (diff global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(428) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(429) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 2 (diff globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - reverted3 = [3]bool{true, false, false} - - // 1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim3, - }) - - reverted3 = [3]bool{false, true, false} - - // 1 ok 1 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "1 ko 2 ok (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim3, - }) - - reverted3 = [3]bool{false, false, true} - - // 1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - testCases = append(testCases, testCase{ - description: "1 ok 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 2 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[1], - expectedClaim: expectedClaim2, - }) - - reverted3 = [3]bool{true, true, false} - - // 2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "2 ko 1 ok (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim3, - }) - - reverted3 = [3]bool{false, true, true} - - // 1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ok 2 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim, - }) - - reverted3 = [3]bool{true, false, true} - - // 1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message (same global index) - expectedClaim.IsMessage = true - expectedClaim.GlobalIndex = big.NewInt(427) - expectedClaim2.IsMessage = true - expectedClaim2.GlobalIndex = big.NewInt(427) - expectedClaim3.IsMessage = true - expectedClaim3.GlobalIndex = big.NewInt(427) - expectedClaimBytes, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim.GlobalIndex, - expectedClaim.MainnetExitRoot, - expectedClaim.RollupExitRoot, - expectedClaim.OriginNetwork, - expectedClaim.OriginAddress, - expectedClaim.DestinationNetwork, - expectedClaim.DestinationAddress, - expectedClaim.Amount, - expectedClaim.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes2, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim2.GlobalIndex, - expectedClaim2.MainnetExitRoot, - expectedClaim2.RollupExitRoot, - expectedClaim2.OriginNetwork, - expectedClaim2.OriginAddress, - expectedClaim2.DestinationNetwork, - expectedClaim2.DestinationAddress, - expectedClaim2.Amount, - expectedClaim2.Metadata, - ) - require.NoError(t, err) - expectedClaimBytes3, err = abi.Pack( - "claimMessage", - proofLocal, - proofRollup, - expectedClaim3.GlobalIndex, - expectedClaim3.MainnetExitRoot, - expectedClaim3.RollupExitRoot, - expectedClaim3.OriginNetwork, - expectedClaim3.OriginAddress, - expectedClaim3.DestinationNetwork, - expectedClaim3.DestinationAddress, - expectedClaim3.Amount, - expectedClaim3.Metadata, - ) - require.NoError(t, err) - tx, err = claimTest.Claim3TestInternal( - auth, - expectedClaimBytes, - expectedClaimBytes2, - expectedClaimBytes3, - reverted3, - ) - require.NoError(t, err) - time.Sleep(1 * time.Second) - r, err = client.TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - testCases = append(testCases, testCase{ - description: "1 ko 1 ok 1 ko (indirectx2, indirect, indirectx2) call claim message 1 (same globalIndex)", - bridgeAddr: bridgeAddr, - log: *r.Logs[0], - expectedClaim: expectedClaim2, - }) - - for _, tc := range testCases { - log.Info(tc.description) - t.Run(tc.description, func(t *testing.T) { - claimEvent, err := bridgeContract.ParseClaimEvent(tc.log) - require.NoError(t, err) - actualClaim := Claim{ - GlobalIndex: claimEvent.GlobalIndex, - OriginNetwork: claimEvent.OriginNetwork, - OriginAddress: claimEvent.OriginAddress, - DestinationAddress: claimEvent.DestinationAddress, - Amount: claimEvent.Amount, - } - err = setClaimCalldata(client, tc.bridgeAddr, tc.log.TxHash, &actualClaim) - require.NoError(t, err) - require.Equal(t, tc.expectedClaim, actualClaim) - }) - } -} diff --git a/bridgesync/config.go b/bridgesync/config.go deleted file mode 100644 index 66eb00ed3..000000000 --- a/bridgesync/config.go +++ /dev/null @@ -1,27 +0,0 @@ -package bridgesync - -import ( - "github.com/0xPolygon/cdk/config/types" - "github.com/ethereum/go-ethereum/common" -) - -type Config struct { - // DBPath path of the DB - DBPath string `mapstructure:"DBPath"` - // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - // InitialBlockNum is the first block that will be queried when starting the synchronization from scratch. - // It should be a number equal or bellow the creation of the bridge contract - InitialBlockNum uint64 `mapstructure:"InitialBlockNum"` - // BridgeAddr is the address of the bridge smart contract - BridgeAddr common.Address `mapstructure:"BridgeAddr"` - // SyncBlockChunkSize is the amount of blocks that will be queried to the client on each request - SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` - // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry - RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. - // Any number smaller than zero will be considered as unlimited retries - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` - // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block - WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` -} diff --git a/bridgesync/docker-compose.yml b/bridgesync/docker-compose.yml deleted file mode 100644 index 9a8f77698..000000000 --- a/bridgesync/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -services: - test-claimdata-l1: - container_name: test-claimdata-l1 - image: hermeznetwork/geth-zkevm-contracts:elderberry-fork.9-geth1.13.11 - environment: - - DEV_PERIOD - ports: - - "8545:8545" - entrypoint: - - geth - - --http - - --http.addr - - "0.0.0.0" - - "--http.corsdomain" - - "*" - - "--http.vhosts" - - "*" - - --dev - - --dev.period - - "1" - - "--datadir" - - "/geth_data" - - "--http.api" - - "admin,eth,debug,miner,net,txpool,personal,web3" diff --git a/bridgesync/downloader.go b/bridgesync/downloader.go deleted file mode 100644 index 3756c9706..000000000 --- a/bridgesync/downloader.go +++ /dev/null @@ -1,301 +0,0 @@ -package bridgesync - -import ( - "bytes" - "fmt" - "math/big" - "strings" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonzkevmbridge" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmbridgev2" - rpcTypes "github.com/0xPolygon/cdk-rpc/types" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/sync" - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rpc" - "github.com/golang-collections/collections/stack" -) - -var ( - bridgeEventSignature = crypto.Keccak256Hash([]byte( - "BridgeEvent(uint8,uint32,address,uint32,address,uint256,bytes,uint32)", - )) - claimEventSignature = crypto.Keccak256Hash([]byte("ClaimEvent(uint256,uint32,address,address,uint256)")) - claimEventSignaturePreEtrog = crypto.Keccak256Hash([]byte("ClaimEvent(uint32,uint32,address,address,uint256)")) - methodIDClaimAsset = common.Hex2Bytes("ccaa2d11") - methodIDClaimMessage = common.Hex2Bytes("f5efcd79") -) - -// EthClienter defines the methods required to interact with an Ethereum client. -type EthClienter interface { - ethereum.LogFilterer - ethereum.BlockNumberReader - ethereum.ChainReader - bind.ContractBackend - Client() *rpc.Client -} - -func buildAppender(client EthClienter, bridge common.Address, syncFullClaims bool) (sync.LogAppenderMap, error) { - bridgeContractV1, err := polygonzkevmbridge.NewPolygonzkevmbridge(bridge, client) - if err != nil { - return nil, err - } - bridgeContractV2, err := polygonzkevmbridgev2.NewPolygonzkevmbridgev2(bridge, client) - if err != nil { - return nil, err - } - appender := make(sync.LogAppenderMap) - - appender[bridgeEventSignature] = func(b *sync.EVMBlock, l types.Log) error { - bridge, err := bridgeContractV2.ParseBridgeEvent(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using d.bridgeContractV2.ParseBridgeEvent: %w", - l, err, - ) - } - b.Events = append(b.Events, Event{Bridge: &Bridge{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - LeafType: bridge.LeafType, - OriginNetwork: bridge.OriginNetwork, - OriginAddress: bridge.OriginAddress, - DestinationNetwork: bridge.DestinationNetwork, - DestinationAddress: bridge.DestinationAddress, - Amount: bridge.Amount, - Metadata: bridge.Metadata, - DepositCount: bridge.DepositCount, - }}) - - return nil - } - - appender[claimEventSignature] = func(b *sync.EVMBlock, l types.Log) error { - claimEvent, err := bridgeContractV2.ParseClaimEvent(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using d.bridgeContractV2.ParseClaimEvent: %w", - l, err, - ) - } - claim := &Claim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - GlobalIndex: claimEvent.GlobalIndex, - OriginNetwork: claimEvent.OriginNetwork, - OriginAddress: claimEvent.OriginAddress, - DestinationAddress: claimEvent.DestinationAddress, - Amount: claimEvent.Amount, - } - if syncFullClaims { - if err := setClaimCalldata(client, bridge, l.TxHash, claim); err != nil { - return err - } - } - b.Events = append(b.Events, Event{Claim: claim}) - return nil - } - - appender[claimEventSignaturePreEtrog] = func(b *sync.EVMBlock, l types.Log) error { - claimEvent, err := bridgeContractV1.ParseClaimEvent(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using d.bridgeContractV1.ParseClaimEvent: %w", - l, err, - ) - } - claim := &Claim{ - BlockNum: b.Num, - BlockPos: uint64(l.Index), - GlobalIndex: big.NewInt(int64(claimEvent.Index)), - OriginNetwork: claimEvent.OriginNetwork, - OriginAddress: claimEvent.OriginAddress, - DestinationAddress: claimEvent.DestinationAddress, - Amount: claimEvent.Amount, - } - if syncFullClaims { - if err := setClaimCalldata(client, bridge, l.TxHash, claim); err != nil { - return err - } - } - b.Events = append(b.Events, Event{Claim: claim}) - return nil - } - - return appender, nil -} - -type call struct { - To common.Address `json:"to"` - Value *rpcTypes.ArgBig `json:"value"` - Err *string `json:"error"` - Input rpcTypes.ArgBytes `json:"input"` - Calls []call `json:"calls"` -} - -type tracerCfg struct { - Tracer string `json:"tracer"` -} - -func setClaimCalldata(client EthClienter, bridge common.Address, txHash common.Hash, claim *Claim) error { - c := &call{} - err := client.Client().Call(c, "debug_traceTransaction", txHash, tracerCfg{Tracer: "callTracer"}) - if err != nil { - return err - } - - // find the claim linked to the event using DFS - callStack := stack.New() - callStack.Push(*c) - for { - if callStack.Len() == 0 { - break - } - - currentCallInterface := callStack.Pop() - currentCall, ok := currentCallInterface.(call) - if !ok { - return fmt.Errorf("unexpected type for 'currentCall'. Expected 'call', got '%T'", currentCallInterface) - } - - if currentCall.To == bridge { - found, err := setClaimIfFoundOnInput( - currentCall.Input, - claim, - ) - if err != nil { - return err - } - if found { - return nil - } - } - for _, c := range currentCall.Calls { - callStack.Push(c) - } - } - return db.ErrNotFound -} - -func setClaimIfFoundOnInput(input []byte, claim *Claim) (bool, error) { - smcAbi, err := abi.JSON(strings.NewReader(polygonzkevmbridgev2.Polygonzkevmbridgev2ABI)) - if err != nil { - return false, err - } - methodID := input[:4] - // Recover Method from signature and ABI - method, err := smcAbi.MethodById(methodID) - if err != nil { - return false, err - } - data, err := method.Inputs.Unpack(input[4:]) - if err != nil { - return false, err - } - // Ignore other methods - if bytes.Equal(methodID, methodIDClaimAsset) || bytes.Equal(methodID, methodIDClaimMessage) { - found, err := decodeClaimCallDataAndSetIfFound(data, claim) - if err != nil { - return false, err - } - if found { - if bytes.Equal(methodID, methodIDClaimMessage) { - claim.IsMessage = true - } - return true, nil - } - return false, nil - } else { - return false, nil - } - // TODO: support both claim asset & message, check if previous versions need special treatment -} - -func decodeClaimCallDataAndSetIfFound(data []interface{}, claim *Claim) (bool, error) { - /* Unpack method inputs. Note that both claimAsset and claimMessage have the same interface - for the relevant parts - claimAsset( - 0: smtProofLocalExitRoot, - 1: smtProofRollupExitRoot, - 2: globalIndex, - 3: mainnetExitRoot, - 4: rollupExitRoot, - 5: originNetwork, - 6: originTokenAddress, - 7: destinationNetwork, - 8: destinationAddress, - 9: amount, - 10: metadata, - ) - claimMessage( - 0: smtProofLocalExitRoot, - 1: smtProofRollupExitRoot, - 2: globalIndex, - 3: mainnetExitRoot, - 4: rollupExitRoot, - 5: originNetwork, - 6: originAddress, - 7: destinationNetwork, - 8: destinationAddress, - 9: amount, - 10: metadata, - ) - */ - actualGlobalIndex, ok := data[2].(*big.Int) - if !ok { - return false, fmt.Errorf("unexpected type for actualGlobalIndex, expected *big.Int got '%T'", data[2]) - } - if actualGlobalIndex.Cmp(claim.GlobalIndex) != 0 { - // not the claim we're looking for - return false, nil - } else { - proofLER := [tree.DefaultHeight]common.Hash{} - proofLERBytes, ok := data[0].([32][32]byte) - if !ok { - return false, fmt.Errorf("unexpected type for proofLERBytes, expected [32][32]byte got '%T'", data[0]) - } - - proofRER := [tree.DefaultHeight]common.Hash{} - proofRERBytes, ok := data[1].([32][32]byte) - if !ok { - return false, fmt.Errorf("unexpected type for proofRERBytes, expected [32][32]byte got '%T'", data[1]) - } - - for i := 0; i < int(tree.DefaultHeight); i++ { - proofLER[i] = proofLERBytes[i] - proofRER[i] = proofRERBytes[i] - } - claim.ProofLocalExitRoot = proofLER - claim.ProofRollupExitRoot = proofRER - - claim.MainnetExitRoot, ok = data[3].([32]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'MainnetExitRoot'. Expected '[32]byte', got '%T'", data[3]) - } - - claim.RollupExitRoot, ok = data[4].([32]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'RollupExitRoot'. Expected '[32]byte', got '%T'", data[4]) - } - - claim.DestinationNetwork, ok = data[7].(uint32) - if !ok { - return false, fmt.Errorf("unexpected type for 'DestinationNetwork'. Expected 'uint32', got '%T'", data[7]) - } - - claim.Metadata, ok = data[10].([]byte) - if !ok { - return false, fmt.Errorf("unexpected type for 'claim Metadata'. Expected '[]byte', got '%T'", data[10]) - } - - claim.GlobalExitRoot = crypto.Keccak256Hash(claim.MainnetExitRoot.Bytes(), claim.RollupExitRoot.Bytes()) - - return true, nil - } -} diff --git a/bridgesync/e2e_test.go b/bridgesync/e2e_test.go deleted file mode 100644 index 06a705f2d..000000000 --- a/bridgesync/e2e_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package bridgesync_test - -import ( - "context" - "math/big" - "testing" - "time" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/require" -) - -func TestBridgeEventE2E(t *testing.T) { - const ( - blockTime = time.Millisecond * 10 - totalBridges = 80 - totalReorgs = 40 - maxReorgDepth = 2 - reorgEveryXIterations = 4 // every X blocks go back [1,maxReorgDepth] blocks - ) - setup := helpers.NewE2EEnvWithEVML2(t) - ctx := context.Background() - // Send bridge txs - bridgesSent := 0 - reorgs := 0 - expectedBridges := []bridgesync.Bridge{} - lastDepositCount := uint32(0) - for i := 1; i > 0; i++ { - // Send bridge - bridge := bridgesync.Bridge{ - Amount: big.NewInt(0), - DepositCount: lastDepositCount, - DestinationNetwork: uint32(i + 1), - DestinationAddress: common.HexToAddress("f00"), - Metadata: []byte{}, - } - lastDepositCount++ - tx, err := setup.L1Environment.BridgeContract.BridgeAsset( - setup.L1Environment.Auth, - bridge.DestinationNetwork, - bridge.DestinationAddress, - bridge.Amount, - bridge.OriginAddress, - true, nil, - ) - require.NoError(t, err) - helpers.CommitBlocks(t, setup.L1Environment.SimBackend, 1, blockTime) - bn, err := setup.L1Environment.SimBackend.Client().BlockNumber(ctx) - require.NoError(t, err) - bridge.BlockNum = bn - receipt, err := setup.L1Environment.SimBackend.Client().TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) - expectedBridges = append(expectedBridges, bridge) - bridgesSent++ - - // Trigger reorg - if i%reorgEveryXIterations == 0 { - blocksToReorg := 1 + i%maxReorgDepth - bn, err := setup.L1Environment.SimBackend.Client().BlockNumber(ctx) - require.NoError(t, err) - helpers.Reorg(t, setup.L1Environment.SimBackend, uint64(blocksToReorg)) - // Clean expected bridges - lastValidBlock := bn - uint64(blocksToReorg) - reorgEffective := false - for i := len(expectedBridges) - 1; i >= 0; i-- { - if expectedBridges[i].BlockNum > lastValidBlock { - log.Debugf("removing expectedBridge with depositCount %d due to reorg", expectedBridges[i].DepositCount) - lastDepositCount = expectedBridges[i].DepositCount - expectedBridges = expectedBridges[0:i] - reorgEffective = true - bridgesSent-- - } - } - if reorgEffective { - reorgs++ - log.Debug("reorgs: ", reorgs) - } - } - - // Finish condition - if bridgesSent >= totalBridges && reorgs >= totalReorgs { - break - } - } - - helpers.CommitBlocks(t, setup.L1Environment.SimBackend, 11, blockTime) - - // Wait for syncer to catch up - time.Sleep(time.Second * 2) // sleeping since the processor could be up to date, but have pending reorgs - lb, err := setup.L1Environment.SimBackend.Client().BlockNumber(ctx) - require.NoError(t, err) - helpers.RequireProcessorUpdated(t, setup.L1Environment.BridgeSync, lb) - - // Get bridges - lastBlock, err := setup.L1Environment.SimBackend.Client().BlockNumber(ctx) - require.NoError(t, err) - actualBridges, err := setup.L1Environment.BridgeSync.GetBridges(ctx, 0, lastBlock) - require.NoError(t, err) - - // Assert bridges - expectedRoot, err := setup.L1Environment.BridgeContract.GetRoot(nil) - require.NoError(t, err) - root, err := setup.L1Environment.BridgeSync.GetExitRootByIndex(ctx, expectedBridges[len(expectedBridges)-1].DepositCount) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRoot).Hex(), root.Hash.Hex()) - require.Equal(t, expectedBridges, actualBridges) -} diff --git a/bridgesync/migrations/bridgesync0001.sql b/bridgesync/migrations/bridgesync0001.sql deleted file mode 100644 index 74adc6d50..000000000 --- a/bridgesync/migrations/bridgesync0001.sql +++ /dev/null @@ -1,42 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS block; -DROP TABLE IF EXISTS claim; -DROP TABLE IF EXISTS bridge; - --- +migrate Up -CREATE TABLE block ( - num BIGINT PRIMARY KEY -); - -CREATE TABLE bridge ( - block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, - block_pos INTEGER NOT NULL, - leaf_type INTEGER NOT NULL, - origin_network INTEGER NOT NULL, - origin_address VARCHAR NOT NULL, - destination_network INTEGER NOT NULL, - destination_address VARCHAR NOT NULL, - amount TEXT NOT NULL, - metadata BLOB, - deposit_count INTEGER NOT NULL, - PRIMARY KEY (block_num, block_pos) -); - -CREATE TABLE claim ( - block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, - block_pos INTEGER NOT NULL, - global_index TEXT NOT NULL, - origin_network INTEGER NOT NULL, - origin_address VARCHAR NOT NULL, - destination_address VARCHAR NOT NULL, - amount TEXT NOT NULL, - proof_local_exit_root VARCHAR, - proof_rollup_exit_root VARCHAR, - mainnet_exit_root VARCHAR, - rollup_exit_root VARCHAR, - global_exit_root VARCHAR, - destination_network INTEGER NOT NULL, - metadata BLOB, - is_message BOOLEAN, - PRIMARY KEY (block_num, block_pos) -); \ No newline at end of file diff --git a/bridgesync/migrations/bridgesync0001_test.go b/bridgesync/migrations/bridgesync0001_test.go deleted file mode 100644 index 51e5aded5..000000000 --- a/bridgesync/migrations/bridgesync0001_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package migrations - -import ( - "context" - "path" - "testing" - - "github.com/0xPolygon/cdk/db" - "github.com/stretchr/testify/require" -) - -func Test001(t *testing.T) { - dbPath := path.Join(t.TempDir(), "bridgesyncTest001.sqlite") - - err := RunMigrations(dbPath) - require.NoError(t, err) - db, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - - ctx := context.Background() - tx, err := db.BeginTx(ctx, nil) - require.NoError(t, err) - - _, err = tx.Exec(` - INSERT INTO block (num) VALUES (1); - - INSERT INTO bridge ( - block_num, - block_pos, - leaf_type, - origin_network, - origin_address, - destination_network, - destination_address, - amount, - metadata, - deposit_count - ) VALUES (1, 0, 0, 0, '0x0000', 0, '0x0000', 0, NULL, 0); - - INSERT INTO claim ( - block_num, - block_pos, - global_index, - origin_network, - origin_address, - destination_address, - amount, - proof_local_exit_root, - proof_rollup_exit_root, - mainnet_exit_root, - rollup_exit_root, - global_exit_root, - destination_network, - metadata, - is_message - ) VALUES (1, 0, 0, 0, '0x0000', '0x0000', 0, '0x000,0x000', '0x000,0x000', '0x000', '0x000', '0x0', 0, NULL, FALSE); - `) - require.NoError(t, err) - err = tx.Commit() - require.NoError(t, err) -} diff --git a/bridgesync/migrations/migrations.go b/bridgesync/migrations/migrations.go deleted file mode 100644 index c500ee386..000000000 --- a/bridgesync/migrations/migrations.go +++ /dev/null @@ -1,23 +0,0 @@ -package migrations - -import ( - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" - treeMigrations "github.com/0xPolygon/cdk/tree/migrations" -) - -//go:embed bridgesync0001.sql -var mig001 string - -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "bridgesync0001", - SQL: mig001, - }, - } - migrations = append(migrations, treeMigrations.Migrations...) - return db.RunMigrations(dbPath, migrations) -} diff --git a/bridgesync/mock_l2_test.go b/bridgesync/mock_l2_test.go deleted file mode 100644 index ef842d189..000000000 --- a/bridgesync/mock_l2_test.go +++ /dev/null @@ -1,577 +0,0 @@ -// Code generated by mockery v2.45.0. DO NOT EDIT. - -package bridgesync - -import ( - context "context" - big "math/big" - - common "github.com/ethereum/go-ethereum/common" - - ethereum "github.com/ethereum/go-ethereum" - - mock "github.com/stretchr/testify/mock" - - rpc "github.com/ethereum/go-ethereum/rpc" - - types "github.com/ethereum/go-ethereum/core/types" -) - -// L2Mock is an autogenerated mock type for the EthClienter type -type L2Mock struct { - mock.Mock -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *L2Mock) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for BlockByHash") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockByNumber provides a mock function with given fields: ctx, number -func (_m *L2Mock) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for BlockByNumber") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockNumber provides a mock function with given fields: ctx -func (_m *L2Mock) BlockNumber(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CallContract provides a mock function with given fields: ctx, call, blockNumber -func (_m *L2Mock) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, call, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CallContract") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { - return rf(ctx, call, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { - r0 = rf(ctx, call, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { - r1 = rf(ctx, call, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Client provides a mock function with given fields: -func (_m *L2Mock) Client() *rpc.Client { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Client") - } - - var r0 *rpc.Client - if rf, ok := ret.Get(0).(func() *rpc.Client); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.Client) - } - } - - return r0 -} - -// CodeAt provides a mock function with given fields: ctx, contract, blockNumber -func (_m *L2Mock) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, contract, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { - return rf(ctx, contract, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { - r0 = rf(ctx, contract, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, contract, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EstimateGas provides a mock function with given fields: ctx, call -func (_m *L2Mock) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - ret := _m.Called(ctx, call) - - if len(ret) == 0 { - panic("no return value specified for EstimateGas") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { - return rf(ctx, call) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { - r0 = rf(ctx, call) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { - r1 = rf(ctx, call) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FilterLogs provides a mock function with given fields: ctx, q -func (_m *L2Mock) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - ret := _m.Called(ctx, q) - - if len(ret) == 0 { - panic("no return value specified for FilterLogs") - } - - var r0 []types.Log - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { - return rf(ctx, q) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { - r0 = rf(ctx, q) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Log) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { - r1 = rf(ctx, q) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HeaderByHash provides a mock function with given fields: ctx, hash -func (_m *L2Mock) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for HeaderByHash") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *L2Mock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *L2Mock) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingCodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { - r0 = rf(ctx, account) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PendingNonceAt provides a mock function with given fields: ctx, account -func (_m *L2Mock) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingNonceAt") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { - r0 = rf(ctx, account) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SendTransaction provides a mock function with given fields: ctx, tx -func (_m *L2Mock) SendTransaction(ctx context.Context, tx *types.Transaction) error { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SendTransaction") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { - r0 = rf(ctx, tx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *L2Mock) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - ret := _m.Called(ctx, q, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeFilterLogs") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { - return rf(ctx, q, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { - r0 = rf(ctx, q, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { - r1 = rf(ctx, q, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SubscribeNewHead provides a mock function with given fields: ctx, ch -func (_m *L2Mock) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - ret := _m.Called(ctx, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeNewHead") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { - return rf(ctx, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { - r0 = rf(ctx, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { - r1 = rf(ctx, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SuggestGasPrice provides a mock function with given fields: ctx -func (_m *L2Mock) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasPrice") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *L2Mock) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasTipCap") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TransactionCount provides a mock function with given fields: ctx, blockHash -func (_m *L2Mock) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { - ret := _m.Called(ctx, blockHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionCount") - } - - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { - return rf(ctx, blockHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { - r0 = rf(ctx, blockHash) - } else { - r0 = ret.Get(0).(uint) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, blockHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index -func (_m *L2Mock) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { - ret := _m.Called(ctx, blockHash, index) - - if len(ret) == 0 { - panic("no return value specified for TransactionInBlock") - } - - var r0 *types.Transaction - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { - return rf(ctx, blockHash, index) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { - r0 = rf(ctx, blockHash, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Transaction) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { - r1 = rf(ctx, blockHash, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewL2Mock creates a new instance of L2Mock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL2Mock(t interface { - mock.TestingT - Cleanup(func()) -}) *L2Mock { - mock := &L2Mock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/bridgesync/mocks/eth_clienter.go b/bridgesync/mocks/eth_clienter.go deleted file mode 100644 index 12a99a988..000000000 --- a/bridgesync/mocks/eth_clienter.go +++ /dev/null @@ -1,1136 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks_bridgesync - -import ( - big "math/big" - - common "github.com/ethereum/go-ethereum/common" - - context "context" - - ethereum "github.com/ethereum/go-ethereum" - - mock "github.com/stretchr/testify/mock" - - rpc "github.com/ethereum/go-ethereum/rpc" - - types "github.com/ethereum/go-ethereum/core/types" -) - -// EthClienter is an autogenerated mock type for the EthClienter type -type EthClienter struct { - mock.Mock -} - -type EthClienter_Expecter struct { - mock *mock.Mock -} - -func (_m *EthClienter) EXPECT() *EthClienter_Expecter { - return &EthClienter_Expecter{mock: &_m.Mock} -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *EthClienter) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for BlockByHash") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' -type EthClienter_BlockByHash_Call struct { - *mock.Call -} - -// BlockByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *EthClienter_Expecter) BlockByHash(ctx interface{}, hash interface{}) *EthClienter_BlockByHash_Call { - return &EthClienter_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} -} - -func (_c *EthClienter_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_BlockByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClienter_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *EthClienter_BlockByHash_Call { - _c.Call.Return(run) - return _c -} - -// BlockByNumber provides a mock function with given fields: ctx, number -func (_m *EthClienter) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for BlockByNumber") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' -type EthClienter_BlockByNumber_Call struct { - *mock.Call -} - -// BlockByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClienter_Expecter) BlockByNumber(ctx interface{}, number interface{}) *EthClienter_BlockByNumber_Call { - return &EthClienter_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} -} - -func (_c *EthClienter_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_BlockByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *EthClienter_BlockByNumber_Call { - _c.Call.Return(run) - return _c -} - -// BlockNumber provides a mock function with given fields: ctx -func (_m *EthClienter) BlockNumber(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type EthClienter_BlockNumber_Call struct { - *mock.Call -} - -// BlockNumber is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClienter_Expecter) BlockNumber(ctx interface{}) *EthClienter_BlockNumber_Call { - return &EthClienter_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} -} - -func (_c *EthClienter_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClienter_BlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClienter_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClienter_BlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClienter_BlockNumber_Call { - _c.Call.Return(run) - return _c -} - -// CallContract provides a mock function with given fields: ctx, call, blockNumber -func (_m *EthClienter) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, call, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CallContract") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { - return rf(ctx, call, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { - r0 = rf(ctx, call, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { - r1 = rf(ctx, call, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' -type EthClienter_CallContract_Call struct { - *mock.Call -} - -// CallContract is a helper method to define mock.On call -// - ctx context.Context -// - call ethereum.CallMsg -// - blockNumber *big.Int -func (_e *EthClienter_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *EthClienter_CallContract_Call { - return &EthClienter_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} -} - -func (_c *EthClienter_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *EthClienter_CallContract_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_CallContract_Call) Return(_a0 []byte, _a1 error) *EthClienter_CallContract_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *EthClienter_CallContract_Call { - _c.Call.Return(run) - return _c -} - -// Client provides a mock function with no fields -func (_m *EthClienter) Client() *rpc.Client { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Client") - } - - var r0 *rpc.Client - if rf, ok := ret.Get(0).(func() *rpc.Client); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*rpc.Client) - } - } - - return r0 -} - -// EthClienter_Client_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Client' -type EthClienter_Client_Call struct { - *mock.Call -} - -// Client is a helper method to define mock.On call -func (_e *EthClienter_Expecter) Client() *EthClienter_Client_Call { - return &EthClienter_Client_Call{Call: _e.mock.On("Client")} -} - -func (_c *EthClienter_Client_Call) Run(run func()) *EthClienter_Client_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *EthClienter_Client_Call) Return(_a0 *rpc.Client) *EthClienter_Client_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EthClienter_Client_Call) RunAndReturn(run func() *rpc.Client) *EthClienter_Client_Call { - _c.Call.Return(run) - return _c -} - -// CodeAt provides a mock function with given fields: ctx, contract, blockNumber -func (_m *EthClienter) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, contract, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { - return rf(ctx, contract, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { - r0 = rf(ctx, contract, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, contract, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' -type EthClienter_CodeAt_Call struct { - *mock.Call -} - -// CodeAt is a helper method to define mock.On call -// - ctx context.Context -// - contract common.Address -// - blockNumber *big.Int -func (_e *EthClienter_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *EthClienter_CodeAt_Call { - return &EthClienter_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} -} - -func (_c *EthClienter_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *EthClienter_CodeAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_CodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_CodeAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *EthClienter_CodeAt_Call { - _c.Call.Return(run) - return _c -} - -// EstimateGas provides a mock function with given fields: ctx, call -func (_m *EthClienter) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - ret := _m.Called(ctx, call) - - if len(ret) == 0 { - panic("no return value specified for EstimateGas") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { - return rf(ctx, call) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { - r0 = rf(ctx, call) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { - r1 = rf(ctx, call) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' -type EthClienter_EstimateGas_Call struct { - *mock.Call -} - -// EstimateGas is a helper method to define mock.On call -// - ctx context.Context -// - call ethereum.CallMsg -func (_e *EthClienter_Expecter) EstimateGas(ctx interface{}, call interface{}) *EthClienter_EstimateGas_Call { - return &EthClienter_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} -} - -func (_c *EthClienter_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *EthClienter_EstimateGas_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.CallMsg)) - }) - return _c -} - -func (_c *EthClienter_EstimateGas_Call) Return(_a0 uint64, _a1 error) *EthClienter_EstimateGas_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *EthClienter_EstimateGas_Call { - _c.Call.Return(run) - return _c -} - -// FilterLogs provides a mock function with given fields: ctx, q -func (_m *EthClienter) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - ret := _m.Called(ctx, q) - - if len(ret) == 0 { - panic("no return value specified for FilterLogs") - } - - var r0 []types.Log - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { - return rf(ctx, q) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { - r0 = rf(ctx, q) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Log) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { - r1 = rf(ctx, q) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' -type EthClienter_FilterLogs_Call struct { - *mock.Call -} - -// FilterLogs is a helper method to define mock.On call -// - ctx context.Context -// - q ethereum.FilterQuery -func (_e *EthClienter_Expecter) FilterLogs(ctx interface{}, q interface{}) *EthClienter_FilterLogs_Call { - return &EthClienter_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} -} - -func (_c *EthClienter_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *EthClienter_FilterLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) - }) - return _c -} - -func (_c *EthClienter_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *EthClienter_FilterLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *EthClienter_FilterLogs_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByHash provides a mock function with given fields: ctx, hash -func (_m *EthClienter) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for HeaderByHash") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' -type EthClienter_HeaderByHash_Call struct { - *mock.Call -} - -// HeaderByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *EthClienter_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClienter_HeaderByHash_Call { - return &EthClienter_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} -} - -func (_c *EthClienter_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_HeaderByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClienter_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClienter_HeaderByHash_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *EthClienter) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type EthClienter_HeaderByNumber_Call struct { - *mock.Call -} - -// HeaderByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClienter_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClienter_HeaderByNumber_Call { - return &EthClienter_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} -} - -func (_c *EthClienter_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_HeaderByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClienter_HeaderByNumber_Call { - _c.Call.Return(run) - return _c -} - -// PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *EthClienter) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingCodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { - r0 = rf(ctx, account) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' -type EthClienter_PendingCodeAt_Call struct { - *mock.Call -} - -// PendingCodeAt is a helper method to define mock.On call -// - ctx context.Context -// - account common.Address -func (_e *EthClienter_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *EthClienter_PendingCodeAt_Call { - return &EthClienter_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} -} - -func (_c *EthClienter_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingCodeAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address)) - }) - return _c -} - -func (_c *EthClienter_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_PendingCodeAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *EthClienter_PendingCodeAt_Call { - _c.Call.Return(run) - return _c -} - -// PendingNonceAt provides a mock function with given fields: ctx, account -func (_m *EthClienter) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingNonceAt") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { - r0 = rf(ctx, account) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' -type EthClienter_PendingNonceAt_Call struct { - *mock.Call -} - -// PendingNonceAt is a helper method to define mock.On call -// - ctx context.Context -// - account common.Address -func (_e *EthClienter_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *EthClienter_PendingNonceAt_Call { - return &EthClienter_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} -} - -func (_c *EthClienter_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingNonceAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address)) - }) - return _c -} - -func (_c *EthClienter_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *EthClienter_PendingNonceAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *EthClienter_PendingNonceAt_Call { - _c.Call.Return(run) - return _c -} - -// SendTransaction provides a mock function with given fields: ctx, tx -func (_m *EthClienter) SendTransaction(ctx context.Context, tx *types.Transaction) error { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SendTransaction") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { - r0 = rf(ctx, tx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// EthClienter_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' -type EthClienter_SendTransaction_Call struct { - *mock.Call -} - -// SendTransaction is a helper method to define mock.On call -// - ctx context.Context -// - tx *types.Transaction -func (_e *EthClienter_Expecter) SendTransaction(ctx interface{}, tx interface{}) *EthClienter_SendTransaction_Call { - return &EthClienter_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} -} - -func (_c *EthClienter_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *EthClienter_SendTransaction_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*types.Transaction)) - }) - return _c -} - -func (_c *EthClienter_SendTransaction_Call) Return(_a0 error) *EthClienter_SendTransaction_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EthClienter_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *EthClienter_SendTransaction_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *EthClienter) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - ret := _m.Called(ctx, q, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeFilterLogs") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { - return rf(ctx, q, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { - r0 = rf(ctx, q, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { - r1 = rf(ctx, q, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' -type EthClienter_SubscribeFilterLogs_Call struct { - *mock.Call -} - -// SubscribeFilterLogs is a helper method to define mock.On call -// - ctx context.Context -// - q ethereum.FilterQuery -// - ch chan<- types.Log -func (_e *EthClienter_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *EthClienter_SubscribeFilterLogs_Call { - return &EthClienter_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} -} - -func (_c *EthClienter_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *EthClienter_SubscribeFilterLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) - }) - return _c -} - -func (_c *EthClienter_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeFilterLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *EthClienter_SubscribeFilterLogs_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeNewHead provides a mock function with given fields: ctx, ch -func (_m *EthClienter) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - ret := _m.Called(ctx, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeNewHead") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { - return rf(ctx, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { - r0 = rf(ctx, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { - r1 = rf(ctx, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' -type EthClienter_SubscribeNewHead_Call struct { - *mock.Call -} - -// SubscribeNewHead is a helper method to define mock.On call -// - ctx context.Context -// - ch chan<- *types.Header -func (_e *EthClienter_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClienter_SubscribeNewHead_Call { - return &EthClienter_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} -} - -func (_c *EthClienter_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClienter_SubscribeNewHead_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(chan<- *types.Header)) - }) - return _c -} - -func (_c *EthClienter_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeNewHead_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClienter_SubscribeNewHead_Call { - _c.Call.Return(run) - return _c -} - -// SuggestGasPrice provides a mock function with given fields: ctx -func (_m *EthClienter) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasPrice") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' -type EthClienter_SuggestGasPrice_Call struct { - *mock.Call -} - -// SuggestGasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClienter_Expecter) SuggestGasPrice(ctx interface{}) *EthClienter_SuggestGasPrice_Call { - return &EthClienter_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} -} - -func (_c *EthClienter_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClienter_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasPrice_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasPrice_Call { - _c.Call.Return(run) - return _c -} - -// SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *EthClienter) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasTipCap") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' -type EthClienter_SuggestGasTipCap_Call struct { - *mock.Call -} - -// SuggestGasTipCap is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClienter_Expecter) SuggestGasTipCap(ctx interface{}) *EthClienter_SuggestGasTipCap_Call { - return &EthClienter_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} -} - -func (_c *EthClienter_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasTipCap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClienter_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasTipCap_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasTipCap_Call { - _c.Call.Return(run) - return _c -} - -// TransactionCount provides a mock function with given fields: ctx, blockHash -func (_m *EthClienter) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { - ret := _m.Called(ctx, blockHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionCount") - } - - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { - return rf(ctx, blockHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { - r0 = rf(ctx, blockHash) - } else { - r0 = ret.Get(0).(uint) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, blockHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' -type EthClienter_TransactionCount_Call struct { - *mock.Call -} - -// TransactionCount is a helper method to define mock.On call -// - ctx context.Context -// - blockHash common.Hash -func (_e *EthClienter_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *EthClienter_TransactionCount_Call { - return &EthClienter_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} -} - -func (_c *EthClienter_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *EthClienter_TransactionCount_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClienter_TransactionCount_Call) Return(_a0 uint, _a1 error) *EthClienter_TransactionCount_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *EthClienter_TransactionCount_Call { - _c.Call.Return(run) - return _c -} - -// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index -func (_m *EthClienter) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { - ret := _m.Called(ctx, blockHash, index) - - if len(ret) == 0 { - panic("no return value specified for TransactionInBlock") - } - - var r0 *types.Transaction - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { - return rf(ctx, blockHash, index) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { - r0 = rf(ctx, blockHash, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Transaction) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { - r1 = rf(ctx, blockHash, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' -type EthClienter_TransactionInBlock_Call struct { - *mock.Call -} - -// TransactionInBlock is a helper method to define mock.On call -// - ctx context.Context -// - blockHash common.Hash -// - index uint -func (_e *EthClienter_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *EthClienter_TransactionInBlock_Call { - return &EthClienter_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} -} - -func (_c *EthClienter_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *EthClienter_TransactionInBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) - }) - return _c -} - -func (_c *EthClienter_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *EthClienter_TransactionInBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *EthClienter_TransactionInBlock_Call { - _c.Call.Return(run) - return _c -} - -// NewEthClienter creates a new instance of EthClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthClienter(t interface { - mock.TestingT - Cleanup(func()) -}) *EthClienter { - mock := &EthClienter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/bridgesync/mocks/reorg_detector.go b/bridgesync/mocks/reorg_detector.go deleted file mode 100644 index d24f4b834..000000000 --- a/bridgesync/mocks/reorg_detector.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks_bridgesync - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - mock "github.com/stretchr/testify/mock" - - reorgdetector "github.com/0xPolygon/cdk/reorgdetector" -) - -// ReorgDetector is an autogenerated mock type for the ReorgDetector type -type ReorgDetector struct { - mock.Mock -} - -type ReorgDetector_Expecter struct { - mock *mock.Mock -} - -func (_m *ReorgDetector) EXPECT() *ReorgDetector_Expecter { - return &ReorgDetector_Expecter{mock: &_m.Mock} -} - -// AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash -func (_m *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { - ret := _m.Called(ctx, id, blockNum, blockHash) - - if len(ret) == 0 { - panic("no return value specified for AddBlockToTrack") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint64, common.Hash) error); ok { - r0 = rf(ctx, id, blockNum, blockHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ReorgDetector_AddBlockToTrack_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBlockToTrack' -type ReorgDetector_AddBlockToTrack_Call struct { - *mock.Call -} - -// AddBlockToTrack is a helper method to define mock.On call -// - ctx context.Context -// - id string -// - blockNum uint64 -// - blockHash common.Hash -func (_e *ReorgDetector_Expecter) AddBlockToTrack(ctx interface{}, id interface{}, blockNum interface{}, blockHash interface{}) *ReorgDetector_AddBlockToTrack_Call { - return &ReorgDetector_AddBlockToTrack_Call{Call: _e.mock.On("AddBlockToTrack", ctx, id, blockNum, blockHash)} -} - -func (_c *ReorgDetector_AddBlockToTrack_Call) Run(run func(ctx context.Context, id string, blockNum uint64, blockHash common.Hash)) *ReorgDetector_AddBlockToTrack_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(common.Hash)) - }) - return _c -} - -func (_c *ReorgDetector_AddBlockToTrack_Call) Return(_a0 error) *ReorgDetector_AddBlockToTrack_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ReorgDetector_AddBlockToTrack_Call) RunAndReturn(run func(context.Context, string, uint64, common.Hash) error) *ReorgDetector_AddBlockToTrack_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *ReorgDetector) Subscribe(id string) (*reorgdetector.Subscription, error) { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 *reorgdetector.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { - return rf(id) - } - if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*reorgdetector.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ReorgDetector_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type ReorgDetector_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *ReorgDetector_Expecter) Subscribe(id interface{}) *ReorgDetector_Subscribe_Call { - return &ReorgDetector_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *ReorgDetector_Subscribe_Call) Run(run func(id string)) *ReorgDetector_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *ReorgDetector_Subscribe_Call) Return(_a0 *reorgdetector.Subscription, _a1 error) *ReorgDetector_Subscribe_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ReorgDetector_Subscribe_Call) RunAndReturn(run func(string) (*reorgdetector.Subscription, error)) *ReorgDetector_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewReorgDetector creates a new instance of ReorgDetector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewReorgDetector(t interface { - mock.TestingT - Cleanup(func()) -}) *ReorgDetector { - mock := &ReorgDetector{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/bridgesync/processor.go b/bridgesync/processor.go deleted file mode 100644 index b74976ccb..000000000 --- a/bridgesync/processor.go +++ /dev/null @@ -1,399 +0,0 @@ -package bridgesync - -import ( - "context" - "database/sql" - "encoding/binary" - "errors" - "fmt" - "math/big" - mutex "sync" - - "github.com/0xPolygon/cdk/bridgesync/migrations" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/iden3/go-iden3-crypto/keccak256" - "github.com/russross/meddler" - _ "modernc.org/sqlite" -) - -const ( - globalIndexPartSize = 4 - globalIndexMaxSize = 9 -) - -var ( - // errBlockNotProcessedFormat indicates that the given block(s) have not been processed yet. - errBlockNotProcessedFormat = fmt.Sprintf("block %%d not processed, last processed: %%d") -) - -// Bridge is the representation of a bridge event -type Bridge struct { - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - LeafType uint8 `meddler:"leaf_type"` - OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address"` - DestinationNetwork uint32 `meddler:"destination_network"` - DestinationAddress common.Address `meddler:"destination_address"` - Amount *big.Int `meddler:"amount,bigint"` - Metadata []byte `meddler:"metadata"` - DepositCount uint32 `meddler:"deposit_count"` -} - -// Hash returns the hash of the bridge event as expected by the exit tree -func (b *Bridge) Hash() common.Hash { - const ( - uint32ByteSize = 4 - bigIntSize = 32 - ) - origNet := make([]byte, uint32ByteSize) - binary.BigEndian.PutUint32(origNet, b.OriginNetwork) - destNet := make([]byte, uint32ByteSize) - binary.BigEndian.PutUint32(destNet, b.DestinationNetwork) - - metaHash := keccak256.Hash(b.Metadata) - var buf [bigIntSize]byte - if b.Amount == nil { - b.Amount = big.NewInt(0) - } - - return common.BytesToHash(keccak256.Hash( - []byte{b.LeafType}, - origNet, - b.OriginAddress[:], - destNet, - b.DestinationAddress[:], - b.Amount.FillBytes(buf[:]), - metaHash, - )) -} - -// Claim representation of a claim event -type Claim struct { - BlockNum uint64 `meddler:"block_num"` - BlockPos uint64 `meddler:"block_pos"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - OriginNetwork uint32 `meddler:"origin_network"` - OriginAddress common.Address `meddler:"origin_address"` - DestinationAddress common.Address `meddler:"destination_address"` - Amount *big.Int `meddler:"amount,bigint"` - ProofLocalExitRoot types.Proof `meddler:"proof_local_exit_root,merkleproof"` - ProofRollupExitRoot types.Proof `meddler:"proof_rollup_exit_root,merkleproof"` - MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` - DestinationNetwork uint32 `meddler:"destination_network"` - Metadata []byte `meddler:"metadata"` - IsMessage bool `meddler:"is_message"` -} - -// Event combination of bridge and claim events -type Event struct { - Pos uint64 - Bridge *Bridge - Claim *Claim -} - -type processor struct { - db *sql.DB - exitTree *tree.AppendOnlyTree - log *log.Logger - mu mutex.RWMutex - halted bool - haltedReason string -} - -func newProcessor(dbPath string, logger *log.Logger) (*processor, error) { - err := migrations.RunMigrations(dbPath) - if err != nil { - return nil, err - } - db, err := db.NewSQLiteDB(dbPath) - if err != nil { - return nil, err - } - - exitTree := tree.NewAppendOnlyTree(db, "") - return &processor{ - db: db, - exitTree: exitTree, - log: logger, - }, nil -} -func (p *processor) GetBridgesPublished( - ctx context.Context, fromBlock, toBlock uint64, -) ([]Bridge, error) { - return p.GetBridges(ctx, fromBlock, toBlock) -} - -func (p *processor) GetBridges( - ctx context.Context, fromBlock, toBlock uint64, -) ([]Bridge, error) { - tx, err := p.db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) - if err != nil { - return nil, err - } - defer func() { - if err := tx.Rollback(); err != nil { - log.Warnf("error rolling back tx: %v", err) - } - }() - rows, err := p.queryBlockRange(tx, fromBlock, toBlock, "bridge") - if err != nil { - return nil, err - } - bridgePtrs := []*Bridge{} - if err = meddler.ScanAll(rows, &bridgePtrs); err != nil { - return nil, err - } - bridgesIface := db.SlicePtrsToSlice(bridgePtrs) - bridges, ok := bridgesIface.([]Bridge) - if !ok { - return nil, errors.New("failed to convert from []*Bridge to []Bridge") - } - return bridges, nil -} - -func (p *processor) GetClaims( - ctx context.Context, fromBlock, toBlock uint64, -) ([]Claim, error) { - tx, err := p.db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) - if err != nil { - return nil, err - } - defer func() { - if err := tx.Rollback(); err != nil { - log.Warnf("error rolling back tx: %v", err) - } - }() - rows, err := p.queryBlockRange(tx, fromBlock, toBlock, "claim") - if err != nil { - return nil, err - } - claimPtrs := []*Claim{} - if err = meddler.ScanAll(rows, &claimPtrs); err != nil { - return nil, err - } - claimsIface := db.SlicePtrsToSlice(claimPtrs) - claims, ok := claimsIface.([]Claim) - if !ok { - return nil, errors.New("failed to convert from []*Claim to []Claim") - } - return claims, nil -} - -func (p *processor) queryBlockRange(tx db.Querier, fromBlock, toBlock uint64, table string) (*sql.Rows, error) { - if err := p.isBlockProcessed(tx, toBlock); err != nil { - return nil, err - } - rows, err := tx.Query(fmt.Sprintf(` - SELECT * FROM %s - WHERE block_num >= $1 AND block_num <= $2; - `, table), fromBlock, toBlock) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, db.ErrNotFound - } - return nil, err - } - return rows, nil -} - -func (p *processor) isBlockProcessed(tx db.Querier, blockNum uint64) error { - lpb, err := p.getLastProcessedBlockWithTx(tx) - if err != nil { - return err - } - if lpb < blockNum { - return fmt.Errorf(errBlockNotProcessedFormat, blockNum, lpb) - } - return nil -} - -// GetLastProcessedBlock returns the last processed block by the processor, including blocks -// that don't have events -func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - return p.getLastProcessedBlockWithTx(p.db) -} - -func (p *processor) getLastProcessedBlockWithTx(tx db.Querier) (uint64, error) { - var lastProcessedBlock uint64 - row := tx.QueryRow("SELECT num FROM block ORDER BY num DESC LIMIT 1;") - err := row.Scan(&lastProcessedBlock) - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - return lastProcessedBlock, err -} - -// Reorg triggers a purge and reset process on the processor to leaf it on a state -// as if the last block processed was firstReorgedBlock-1 -func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - tx, err := db.NewTx(ctx, p.db) - if err != nil { - return err - } - defer func() { - if err != nil { - if errRllbck := tx.Rollback(); errRllbck != nil { - log.Errorf("error while rolling back tx %v", errRllbck) - } - } - }() - - res, err := tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) - if err != nil { - return err - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return err - } - - if err = p.exitTree.Reorg(tx, firstReorgedBlock); err != nil { - return err - } - if err := tx.Commit(); err != nil { - return err - } - sync.UnhaltIfAffectedRows(&p.halted, &p.haltedReason, &p.mu, rowsAffected) - return nil -} - -// ProcessBlock process the events of the block to build the exit tree -// and updates the last processed block (can be called without events for that purpose) -func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - if p.isHalted() { - log.Errorf("processor is halted due to: %s", p.haltedReason) - return sync.ErrInconsistentState - } - tx, err := db.NewTx(ctx, p.db) - if err != nil { - return err - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRllbck := tx.Rollback(); errRllbck != nil { - log.Errorf("error while rolling back tx %v", errRllbck) - } - } - }() - - if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { - return err - } - for _, e := range block.Events { - event, ok := e.(Event) - if !ok { - return errors.New("failed to convert sync.Block.Event to Event") - } - if event.Bridge != nil { - if err = p.exitTree.AddLeaf(tx, block.Num, event.Pos, types.Leaf{ - Index: event.Bridge.DepositCount, - Hash: event.Bridge.Hash(), - }); err != nil { - if errors.Is(err, tree.ErrInvalidIndex) { - p.mu.Lock() - p.halted = true - p.haltedReason = fmt.Sprintf("error adding leaf to the exit tree: %v", err) - p.mu.Unlock() - } - return sync.ErrInconsistentState - } - if err = meddler.Insert(tx, "bridge", event.Bridge); err != nil { - return err - } - } - if event.Claim != nil { - if err = meddler.Insert(tx, "claim", event.Claim); err != nil { - return err - } - } - } - - if err := tx.Commit(); err != nil { - return err - } - shouldRollback = false - - p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num) - return nil -} - -func GenerateGlobalIndex(mainnetFlag bool, rollupIndex uint32, localExitRootIndex uint32) *big.Int { - var ( - globalIndexBytes []byte - buf [globalIndexPartSize]byte - ) - if mainnetFlag { - globalIndexBytes = append(globalIndexBytes, big.NewInt(1).Bytes()...) - ri := big.NewInt(0).FillBytes(buf[:]) - globalIndexBytes = append(globalIndexBytes, ri...) - } else { - ri := big.NewInt(0).SetUint64(uint64(rollupIndex)).FillBytes(buf[:]) - globalIndexBytes = append(globalIndexBytes, ri...) - } - leri := big.NewInt(0).SetUint64(uint64(localExitRootIndex)).FillBytes(buf[:]) - globalIndexBytes = append(globalIndexBytes, leri...) - - result := big.NewInt(0).SetBytes(globalIndexBytes) - - return result -} - -// Decodes global index to its three parts: -// 1. mainnetFlag - first byte -// 2. rollupIndex - next 4 bytes -// 3. localExitRootIndex - last 4 bytes -// NOTE - mainnet flag is not in the global index bytes if it is false -// NOTE - rollup index is 0 if mainnet flag is true -// NOTE - rollup index is not in the global index bytes if mainnet flag is false and rollup index is 0 -func DecodeGlobalIndex(globalIndex *big.Int) (mainnetFlag bool, - rollupIndex uint32, localExitRootIndex uint32, err error) { - globalIndexBytes := globalIndex.Bytes() - l := len(globalIndexBytes) - if l > globalIndexMaxSize { - return false, 0, 0, errors.New("invalid global index length") - } - - if l == 0 { - // false, 0, 0 - return - } - - if l == globalIndexMaxSize { - // true, rollupIndex, localExitRootIndex - mainnetFlag = true - } - - localExitRootFromIdx := l - globalIndexPartSize - if localExitRootFromIdx < 0 { - localExitRootFromIdx = 0 - } - - rollupIndexFromIdx := localExitRootFromIdx - globalIndexPartSize - if rollupIndexFromIdx < 0 { - rollupIndexFromIdx = 0 - } - - rollupIndex = convertBytesToUint32(globalIndexBytes[rollupIndexFromIdx:localExitRootFromIdx]) - localExitRootIndex = convertBytesToUint32(globalIndexBytes[localExitRootFromIdx:]) - - return -} - -func convertBytesToUint32(bytes []byte) uint32 { - return uint32(big.NewInt(0).SetBytes(bytes).Uint64()) -} - -func (p *processor) isHalted() bool { - p.mu.RLock() - defer p.mu.RUnlock() - return p.halted -} diff --git a/bridgesync/processor_test.go b/bridgesync/processor_test.go deleted file mode 100644 index b2bcd1116..000000000 --- a/bridgesync/processor_test.go +++ /dev/null @@ -1,867 +0,0 @@ -package bridgesync - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math/big" - "os" - "path" - "slices" - "testing" - - migrationsBridge "github.com/0xPolygon/cdk/bridgesync/migrations" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree/testvectors" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" - "github.com/stretchr/testify/require" -) - -func TestBigIntString(t *testing.T) { - globalIndex := GenerateGlobalIndex(true, 0, 1093) - fmt.Println(globalIndex.String()) - - _, ok := new(big.Int).SetString(globalIndex.String(), 10) - require.True(t, ok) - - dbPath := path.Join(t.TempDir(), "bridgesyncTestBigIntString.sqlite") - - err := migrationsBridge.RunMigrations(dbPath) - require.NoError(t, err) - db, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - - ctx := context.Background() - tx, err := db.BeginTx(ctx, nil) - require.NoError(t, err) - - claim := &Claim{ - BlockNum: 1, - BlockPos: 0, - GlobalIndex: GenerateGlobalIndex(true, 0, 1093), - OriginNetwork: 11, - Amount: big.NewInt(11), - OriginAddress: common.HexToAddress("0x11"), - DestinationAddress: common.HexToAddress("0x11"), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - GlobalExitRoot: common.Hash{}, - DestinationNetwork: 12, - } - - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, claim.BlockNum) - require.NoError(t, err) - require.NoError(t, meddler.Insert(tx, "claim", claim)) - - require.NoError(t, tx.Commit()) - - tx, err = db.BeginTx(ctx, nil) - require.NoError(t, err) - - rows, err := tx.Query(` - SELECT * FROM claim - WHERE block_num >= $1 AND block_num <= $2; - `, claim.BlockNum, claim.BlockNum) - require.NoError(t, err) - - claimsFromDB := []*Claim{} - require.NoError(t, meddler.ScanAll(rows, &claimsFromDB)) - require.Len(t, claimsFromDB, 1) - require.Equal(t, claim, claimsFromDB[0]) -} - -func TestProceessor(t *testing.T) { - path := path.Join(t.TempDir(), "aggsenderTestProceessor.sqlite") - logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, logger) - require.NoError(t, err) - actions := []processAction{ - // processed: ~ - &getLastProcessedBlockAction{ - p: p, - description: "on an empty processor", - ctx: context.Background(), - expectedLastProcessedBlock: 0, - expectedErr: nil, - }, - &reorgAction{ - p: p, - description: "on an empty processor: firstReorgedBlock = 0", - firstReorgedBlock: 0, - expectedErr: nil, - }, - &reorgAction{ - p: p, - description: "on an empty processor: firstReorgedBlock = 1", - firstReorgedBlock: 1, - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "on an empty processor", - ctx: context.Background(), - fromBlock: 0, - toBlock: 2, - expectedClaims: nil, - expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), - }, - &getBridges{ - p: p, - description: "on an empty processor", - ctx: context.Background(), - fromBlock: 0, - toBlock: 2, - expectedBridges: nil, - expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), - }, - &processBlockAction{ - p: p, - description: "block1", - block: block1, - expectedErr: nil, - }, - // processed: block1 - &getLastProcessedBlockAction{ - p: p, - description: "after block1", - ctx: context.Background(), - expectedLastProcessedBlock: 1, - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block1: range 0, 2", - ctx: context.Background(), - fromBlock: 0, - toBlock: 2, - expectedClaims: nil, - expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 1), - }, - &getBridges{ - p: p, - description: "after block1: range 0, 2", - ctx: context.Background(), - fromBlock: 0, - toBlock: 2, - expectedBridges: nil, - expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 1), - }, - &getClaims{ - p: p, - description: "after block1: range 1, 1", - ctx: context.Background(), - fromBlock: 1, - toBlock: 1, - expectedClaims: eventsToClaims(block1.Events), - expectedErr: nil, - }, - &getBridges{ - p: p, - description: "after block1: range 1, 1", - ctx: context.Background(), - fromBlock: 1, - toBlock: 1, - expectedBridges: eventsToBridges(block1.Events), - expectedErr: nil, - }, - &reorgAction{ - p: p, - description: "after block1", - firstReorgedBlock: 1, - expectedErr: nil, - }, - // processed: ~ - &getClaims{ - p: p, - description: "after block1 reorged", - ctx: context.Background(), - fromBlock: 0, - toBlock: 2, - expectedClaims: nil, - expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), - }, - &getBridges{ - p: p, - description: "after block1 reorged", - ctx: context.Background(), - fromBlock: 0, - toBlock: 2, - expectedBridges: nil, - expectedErr: fmt.Errorf(errBlockNotProcessedFormat, 2, 0), - }, - &processBlockAction{ - p: p, - description: "block1 (after it's reorged)", - block: block1, - expectedErr: nil, - }, - // processed: block3 - &processBlockAction{ - p: p, - description: "block3", - block: block3, - expectedErr: nil, - }, - // processed: block1, block3 - &getLastProcessedBlockAction{ - p: p, - description: "after block3", - ctx: context.Background(), - expectedLastProcessedBlock: 3, - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block3: range 2, 2", - ctx: context.Background(), - fromBlock: 2, - toBlock: 2, - expectedClaims: []Claim{}, - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block3: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedClaims: append( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events)..., - ), - expectedErr: nil, - }, - &getBridges{ - p: p, - description: "after block3: range 2, 2", - ctx: context.Background(), - fromBlock: 2, - toBlock: 2, - expectedBridges: []Bridge{}, - expectedErr: nil, - }, - &getBridges{ - p: p, - description: "after block3: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedBridges: append( - eventsToBridges(block1.Events), - eventsToBridges(block3.Events)..., - ), - expectedErr: nil, - }, - &reorgAction{ - p: p, - description: "after block3, with value 3", - firstReorgedBlock: 3, - expectedErr: nil, - }, - // processed: block1 - &getLastProcessedBlockAction{ - p: p, - description: "after block3 reorged", - ctx: context.Background(), - expectedLastProcessedBlock: 1, - expectedErr: nil, - }, - &reorgAction{ - p: p, - description: "after block3, with value 2", - firstReorgedBlock: 2, - expectedErr: nil, - }, - &getLastProcessedBlockAction{ - p: p, - description: "after block2 reorged", - ctx: context.Background(), - expectedLastProcessedBlock: 1, - expectedErr: nil, - }, - &processBlockAction{ - p: p, - description: "block3 after reorg", - block: block3, - expectedErr: nil, - }, - // processed: block1, block3 - &processBlockAction{ - p: p, - description: "block4", - block: block4, - expectedErr: nil, - }, - // processed: block1, block3, block4 - &processBlockAction{ - p: p, - description: "block5", - block: block5, - expectedErr: nil, - }, - // processed: block1, block3, block4, block5 - &getLastProcessedBlockAction{ - p: p, - description: "after block5", - ctx: context.Background(), - expectedLastProcessedBlock: 5, - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block5: range 1, 3", - ctx: context.Background(), - fromBlock: 1, - toBlock: 3, - expectedClaims: append( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events)..., - ), - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block5: range 4, 5", - ctx: context.Background(), - fromBlock: 4, - toBlock: 5, - expectedClaims: append( - eventsToClaims(block4.Events), - eventsToClaims(block5.Events)..., - ), - expectedErr: nil, - }, - &getClaims{ - p: p, - description: "after block5: range 0, 5", - ctx: context.Background(), - fromBlock: 0, - toBlock: 5, - expectedClaims: slices.Concat( - eventsToClaims(block1.Events), - eventsToClaims(block3.Events), - eventsToClaims(block4.Events), - eventsToClaims(block5.Events), - ), - expectedErr: nil, - }, - } - - for _, a := range actions { - log.Debugf("%s: %s", a.method(), a.desc()) - a.execute(t) - } -} - -// BOILERPLATE - -// blocks - -var ( - block1 = sync.Block{ - Num: 1, - Events: []interface{}{ - Event{Bridge: &Bridge{ - BlockNum: 1, - BlockPos: 0, - LeafType: 1, - OriginNetwork: 1, - OriginAddress: common.HexToAddress("01"), - DestinationNetwork: 1, - DestinationAddress: common.HexToAddress("01"), - Amount: big.NewInt(1), - Metadata: common.Hex2Bytes("01"), - DepositCount: 0, - }}, - Event{Claim: &Claim{ - BlockNum: 1, - BlockPos: 1, - GlobalIndex: big.NewInt(1), - OriginNetwork: 1, - OriginAddress: common.HexToAddress("01"), - DestinationAddress: common.HexToAddress("01"), - Amount: big.NewInt(1), - }}, - }, - } - block3 = sync.Block{ - Num: 3, - Events: []interface{}{ - Event{Bridge: &Bridge{ - BlockNum: 3, - BlockPos: 0, - LeafType: 2, - OriginNetwork: 2, - OriginAddress: common.HexToAddress("02"), - DestinationNetwork: 2, - DestinationAddress: common.HexToAddress("02"), - Amount: big.NewInt(2), - Metadata: common.Hex2Bytes("02"), - DepositCount: 1, - }}, - Event{Bridge: &Bridge{ - BlockNum: 3, - BlockPos: 1, - LeafType: 3, - OriginNetwork: 3, - OriginAddress: common.HexToAddress("03"), - DestinationNetwork: 3, - DestinationAddress: common.HexToAddress("03"), - Amount: big.NewInt(0), - Metadata: common.Hex2Bytes("03"), - DepositCount: 2, - }}, - }, - } - block4 = sync.Block{ - Num: 4, - Events: []interface{}{}, - } - block5 = sync.Block{ - Num: 5, - Events: []interface{}{ - Event{Claim: &Claim{ - BlockNum: 4, - BlockPos: 0, - GlobalIndex: big.NewInt(4), - OriginNetwork: 4, - OriginAddress: common.HexToAddress("04"), - DestinationAddress: common.HexToAddress("04"), - Amount: big.NewInt(4), - }}, - Event{Claim: &Claim{ - BlockNum: 4, - BlockPos: 1, - GlobalIndex: big.NewInt(5), - OriginNetwork: 5, - OriginAddress: common.HexToAddress("05"), - DestinationAddress: common.HexToAddress("05"), - Amount: big.NewInt(5), - }}, - }, - } -) - -// actions - -type processAction interface { - method() string - desc() string - execute(t *testing.T) -} - -// GetClaims - -type getClaims struct { - p *processor - description string - ctx context.Context - fromBlock uint64 - toBlock uint64 - expectedClaims []Claim - expectedErr error -} - -func (a *getClaims) method() string { - return "GetClaims" -} - -func (a *getClaims) desc() string { - return a.description -} - -func (a *getClaims) execute(t *testing.T) { - t.Helper() - actualEvents, actualErr := a.p.GetClaims(a.ctx, a.fromBlock, a.toBlock) - require.Equal(t, a.expectedErr, actualErr) - require.Equal(t, a.expectedClaims, actualEvents) -} - -// GetBridges - -type getBridges struct { - p *processor - description string - ctx context.Context - fromBlock uint64 - toBlock uint64 - expectedBridges []Bridge - expectedErr error -} - -func (a *getBridges) method() string { - return "GetBridges" -} - -func (a *getBridges) desc() string { - return a.description -} - -func (a *getBridges) execute(t *testing.T) { - t.Helper() - actualEvents, actualErr := a.p.GetBridges(a.ctx, a.fromBlock, a.toBlock) - require.Equal(t, a.expectedBridges, actualEvents) - require.Equal(t, a.expectedErr, actualErr) -} - -// getLastProcessedBlock - -type getLastProcessedBlockAction struct { - p *processor - description string - ctx context.Context - expectedLastProcessedBlock uint64 - expectedErr error -} - -func (a *getLastProcessedBlockAction) method() string { - return "getLastProcessedBlock" -} - -func (a *getLastProcessedBlockAction) desc() string { - return a.description -} - -func (a *getLastProcessedBlockAction) execute(t *testing.T) { - t.Helper() - - actualLastProcessedBlock, actualErr := a.p.GetLastProcessedBlock(a.ctx) - require.Equal(t, a.expectedLastProcessedBlock, actualLastProcessedBlock) - require.Equal(t, a.expectedErr, actualErr) -} - -// reorg - -type reorgAction struct { - p *processor - description string - firstReorgedBlock uint64 - expectedErr error -} - -func (a *reorgAction) method() string { - return "reorg" -} - -func (a *reorgAction) desc() string { - return a.description -} - -func (a *reorgAction) execute(t *testing.T) { - t.Helper() - - actualErr := a.p.Reorg(context.Background(), a.firstReorgedBlock) - require.Equal(t, a.expectedErr, actualErr) -} - -// storeBridgeEvents - -type processBlockAction struct { - p *processor - description string - block sync.Block - expectedErr error -} - -func (a *processBlockAction) method() string { - return "storeBridgeEvents" -} - -func (a *processBlockAction) desc() string { - return a.description -} - -func (a *processBlockAction) execute(t *testing.T) { - t.Helper() - - actualErr := a.p.ProcessBlock(context.Background(), a.block) - require.Equal(t, a.expectedErr, actualErr) -} - -func eventsToBridges(events []interface{}) []Bridge { - bridges := []Bridge{} - for _, event := range events { - e, ok := event.(Event) - if !ok { - panic("should be ok") - } - if e.Bridge != nil { - bridges = append(bridges, *e.Bridge) - } - } - return bridges -} - -func eventsToClaims(events []interface{}) []Claim { - claims := []Claim{} - for _, event := range events { - e, ok := event.(Event) - if !ok { - panic("should be ok") - } - if e.Claim != nil { - claims = append(claims, *e.Claim) - } - } - return claims -} - -func TestHashBridge(t *testing.T) { - data, err := os.ReadFile("../tree/testvectors/leaf-vectors.json") - require.NoError(t, err) - - var leafVectors []testvectors.DepositVectorRaw - err = json.Unmarshal(data, &leafVectors) - require.NoError(t, err) - - for ti, testVector := range leafVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - amount, err := big.NewInt(0).SetString(testVector.Amount, 0) - require.True(t, err) - - bridge := Bridge{ - OriginNetwork: testVector.OriginNetwork, - OriginAddress: common.HexToAddress(testVector.TokenAddress), - Amount: amount, - DestinationNetwork: testVector.DestinationNetwork, - DestinationAddress: common.HexToAddress(testVector.DestinationAddress), - DepositCount: uint32(ti + 1), - Metadata: common.FromHex(testVector.Metadata), - } - require.Equal(t, common.HexToHash(testVector.ExpectedHash), bridge.Hash()) - }) - } -} - -func TestDecodeGlobalIndex(t *testing.T) { - t.Parallel() - - tests := []struct { - name string - globalIndex *big.Int - expectedMainnetFlag bool - expectedRollupIndex uint32 - expectedLocalIndex uint32 - expectedErr error - }{ - { - name: "Mainnet flag true, rollup index 0", - globalIndex: GenerateGlobalIndex(true, 0, 2), - expectedMainnetFlag: true, - expectedRollupIndex: 0, - expectedLocalIndex: 2, - expectedErr: nil, - }, - { - name: "Mainnet flag true, indexes 0", - globalIndex: GenerateGlobalIndex(true, 0, 0), - expectedMainnetFlag: true, - expectedRollupIndex: 0, - expectedLocalIndex: 0, - expectedErr: nil, - }, - { - name: "Mainnet flag false, rollup index 0", - globalIndex: GenerateGlobalIndex(false, 0, 2), - expectedMainnetFlag: false, - expectedRollupIndex: 0, - expectedLocalIndex: 2, - expectedErr: nil, - }, - { - name: "Mainnet flag false, rollup index non-zero", - globalIndex: GenerateGlobalIndex(false, 11, 0), - expectedMainnetFlag: false, - expectedRollupIndex: 11, - expectedLocalIndex: 0, - expectedErr: nil, - }, - { - name: "Mainnet flag false, indexes 0", - globalIndex: GenerateGlobalIndex(false, 0, 0), - expectedMainnetFlag: false, - expectedRollupIndex: 0, - expectedLocalIndex: 0, - expectedErr: nil, - }, - { - name: "Mainnet flag false, indexes non zero", - globalIndex: GenerateGlobalIndex(false, 1231, 111234), - expectedMainnetFlag: false, - expectedRollupIndex: 1231, - expectedLocalIndex: 111234, - expectedErr: nil, - }, - { - name: "Invalid global index length", - globalIndex: big.NewInt(0).SetBytes([]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}), - expectedMainnetFlag: false, - expectedRollupIndex: 0, - expectedLocalIndex: 0, - expectedErr: errors.New("invalid global index length"), - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - mainnetFlag, rollupIndex, localExitRootIndex, err := DecodeGlobalIndex(tt.globalIndex) - if tt.expectedErr != nil { - require.EqualError(t, err, tt.expectedErr.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.expectedMainnetFlag, mainnetFlag) - require.Equal(t, tt.expectedRollupIndex, rollupIndex) - require.Equal(t, tt.expectedLocalIndex, localExitRootIndex) - }) - } -} - -func TestInsertAndGetClaim(t *testing.T) { - path := path.Join(t.TempDir(), "aggsenderTestInsertAndGetClaim.sqlite") - log.Debugf("sqlite path: %s", path) - err := migrationsBridge.RunMigrations(path) - require.NoError(t, err) - logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, logger) - require.NoError(t, err) - - tx, err := p.db.BeginTx(context.Background(), nil) - require.NoError(t, err) - - // insert test claim - testClaim := &Claim{ - BlockNum: 1, - BlockPos: 0, - GlobalIndex: GenerateGlobalIndex(true, 0, 1093), - OriginNetwork: 11, - OriginAddress: common.HexToAddress("0x11"), - DestinationAddress: common.HexToAddress("0x11"), - Amount: big.NewInt(11), - ProofLocalExitRoot: types.Proof{}, - ProofRollupExitRoot: types.Proof{}, - MainnetExitRoot: common.Hash{}, - RollupExitRoot: common.Hash{}, - GlobalExitRoot: common.Hash{}, - DestinationNetwork: 12, - Metadata: []byte("0x11"), - IsMessage: false, - } - - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, testClaim.BlockNum) - require.NoError(t, err) - require.NoError(t, meddler.Insert(tx, "claim", testClaim)) - - require.NoError(t, tx.Commit()) - - // get test claim - claims, err := p.GetClaims(context.Background(), 1, 1) - require.NoError(t, err) - require.Len(t, claims, 1) - require.Equal(t, testClaim, &claims[0]) -} - -func TestGetBridgesPublished(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - fromBlock uint64 - toBlock uint64 - bridges []Bridge - lastUpdatedDepositCount uint32 - expectedBridges []Bridge - expectedError error - }{ - { - name: "no bridges", - fromBlock: 1, - toBlock: 10, - bridges: []Bridge{}, - lastUpdatedDepositCount: 0, - expectedBridges: []Bridge{}, - expectedError: nil, - }, - { - name: "bridges within deposit count", - fromBlock: 1, - toBlock: 10, - bridges: []Bridge{ - {DepositCount: 1, BlockNum: 1, Amount: big.NewInt(1)}, - {DepositCount: 2, BlockNum: 2, Amount: big.NewInt(1)}, - }, - lastUpdatedDepositCount: 2, - expectedBridges: []Bridge{ - {DepositCount: 1, BlockNum: 1, Amount: big.NewInt(1)}, - {DepositCount: 2, BlockNum: 2, Amount: big.NewInt(1)}, - }, - expectedError: nil, - }, - } - - for _, tc := range testCases { - tc := tc - - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - path := path.Join(t.TempDir(), fmt.Sprintf("bridgesyncTestGetBridgesPublished_%s.sqlite", tc.name)) - require.NoError(t, migrationsBridge.RunMigrations(path)) - logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, logger) - require.NoError(t, err) - - tx, err := p.db.BeginTx(context.Background(), nil) - require.NoError(t, err) - - for i := tc.fromBlock; i <= tc.toBlock; i++ { - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, i) - require.NoError(t, err) - } - - for _, bridge := range tc.bridges { - require.NoError(t, meddler.Insert(tx, "bridge", &bridge)) - } - - require.NoError(t, tx.Commit()) - - ctx := context.Background() - bridges, err := p.GetBridgesPublished(ctx, tc.fromBlock, tc.toBlock) - - if tc.expectedError != nil { - require.Equal(t, tc.expectedError, err) - } else { - require.NoError(t, err) - require.Equal(t, tc.expectedBridges, bridges) - } - }) - } -} - -func TestProcessBlockInvalidIndex(t *testing.T) { - path := path.Join(t.TempDir(), "aggsenderTestProceessor.sqlite") - logger := log.WithFields("bridge-syncer", "foo") - p, err := newProcessor(path, logger) - require.NoError(t, err) - err = p.ProcessBlock(context.Background(), sync.Block{ - Num: 0, - Events: []interface{}{ - Event{Bridge: &Bridge{DepositCount: 5}}, - }, - }) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) - require.True(t, p.halted) - err = p.ProcessBlock(context.Background(), sync.Block{}) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} diff --git a/build/docker-compose.yml b/build/docker-compose.yml deleted file mode 100644 index e69de29bb..000000000 diff --git a/claimsponsor/claimsponsor.go b/claimsponsor/claimsponsor.go deleted file mode 100644 index e93f18988..000000000 --- a/claimsponsor/claimsponsor.go +++ /dev/null @@ -1,246 +0,0 @@ -package claimsponsor - -import ( - "context" - "database/sql" - "errors" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk/claimsponsor/migrations" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" -) - -type ClaimStatus string - -const ( - PendingClaimStatus ClaimStatus = "pending" - WIPClaimStatus ClaimStatus = "work in progress" - SuccessClaimStatus ClaimStatus = "success" - FailedClaimStatus ClaimStatus = "failed" -) - -var ( - ErrInvalidClaim = errors.New("invalid claim") - ErrClaimDoesntExist = errors.New("the claim requested to be updated does not exist") -) - -// Claim representation of a claim event -type Claim struct { - LeafType uint8 `meddler:"leaf_type"` - ProofLocalExitRoot tree.Proof `meddler:"proof_local_exit_root,merkleproof"` - ProofRollupExitRoot tree.Proof `meddler:"proof_rollup_exit_root,merkleproof"` - GlobalIndex *big.Int `meddler:"global_index,bigint"` - MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` - OriginNetwork uint32 `meddler:"origin_network"` - OriginTokenAddress common.Address `meddler:"origin_token_address,address"` - DestinationNetwork uint32 `meddler:"destination_network"` - DestinationAddress common.Address `meddler:"destination_address,address"` - Amount *big.Int `meddler:"amount,bigint"` - Metadata []byte `meddler:"metadata"` - Status ClaimStatus `meddler:"status"` - TxID string `meddler:"tx_id"` -} - -func (c *Claim) Key() []byte { - return c.GlobalIndex.Bytes() -} - -type ClaimSender interface { - checkClaim(ctx context.Context, claim *Claim) error - sendClaim(ctx context.Context, claim *Claim) (string, error) - claimStatus(ctx context.Context, id string) (ClaimStatus, error) -} - -type ClaimSponsor struct { - logger *log.Logger - db *sql.DB - sender ClaimSender - rh *sync.RetryHandler - waitTxToBeMinedPeriod time.Duration - waitOnEmptyQueue time.Duration -} - -func newClaimSponsor( - logger *log.Logger, - dbPath string, - sender ClaimSender, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - waitTxToBeMinedPeriod time.Duration, - waitOnEmptyQueue time.Duration, -) (*ClaimSponsor, error) { - err := migrations.RunMigrations(dbPath) - if err != nil { - return nil, err - } - db, err := db.NewSQLiteDB(dbPath) - if err != nil { - return nil, err - } - rh := &sync.RetryHandler{ - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - RetryAfterErrorPeriod: retryAfterErrorPeriod, - } - - return &ClaimSponsor{ - logger: logger, - db: db, - sender: sender, - rh: rh, - waitTxToBeMinedPeriod: waitTxToBeMinedPeriod, - waitOnEmptyQueue: waitOnEmptyQueue, - }, nil -} - -func (c *ClaimSponsor) Start(ctx context.Context) { - attempts := 0 - - for { - select { - case <-ctx.Done(): - return - - default: - err := c.claim(ctx) - if err != nil { - attempts++ - c.logger.Error(err) - c.rh.Handle("claimsponsor main loop", attempts) - } else { - attempts = 0 - } - } - } -} - -func (c *ClaimSponsor) claim(ctx context.Context) error { - claim, err := c.getWIPClaim() - if err != nil && !errors.Is(err, db.ErrNotFound) { - return fmt.Errorf("error getting WIP claim: %w", err) - } - if errors.Is(err, db.ErrNotFound) || claim == nil { - // there is no WIP claim, go for the next pending claim - claim, err = c.getFirstPendingClaim() - if err != nil { - if errors.Is(err, db.ErrNotFound) { - c.logger.Debugf("queue is empty") - time.Sleep(c.waitOnEmptyQueue) - return nil - } - return fmt.Errorf("error calling getClaim with globalIndex %s: %w", claim.GlobalIndex.String(), err) - } - txID, err := c.sender.sendClaim(ctx, claim) - if err != nil { - return fmt.Errorf("error getting sending claim: %w", err) - } - if err := c.updateClaimTxID(claim.GlobalIndex, txID); err != nil { - return fmt.Errorf("error updating claim txID: %w", err) - } - } - - c.logger.Infof("waiting for tx %s with global index %s to be processed", claim.TxID, claim.GlobalIndex.String()) - status, err := c.waitForTxResult(ctx, claim.TxID) - if err != nil { - return fmt.Errorf("error calling waitForTxResult for tx %s: %w", claim.TxID, err) - } - c.logger.Infof("tx %s with global index %s is processed, status: %s", claim.TxID, claim.GlobalIndex.String(), status) - return c.updateClaimStatus(claim.GlobalIndex, status) -} - -func (c *ClaimSponsor) getWIPClaim() (*Claim, error) { - claim := &Claim{} - err := meddler.QueryRow( - c.db, claim, - `SELECT * FROM claim WHERE status = $1 ORDER BY rowid ASC LIMIT 1;`, - WIPClaimStatus, - ) - return claim, db.ReturnErrNotFound(err) -} - -func (c *ClaimSponsor) getFirstPendingClaim() (*Claim, error) { - claim := &Claim{} - err := meddler.QueryRow( - c.db, claim, - `SELECT * FROM claim WHERE status = $1 ORDER BY rowid ASC LIMIT 1;`, - PendingClaimStatus, - ) - return claim, db.ReturnErrNotFound(err) -} - -func (c *ClaimSponsor) updateClaimTxID(globalIndex *big.Int, txID string) error { - res, err := c.db.Exec( - `UPDATE claim SET tx_id = $1 WHERE global_index = $2`, - txID, globalIndex.String(), - ) - if err != nil { - return fmt.Errorf("error updating claim status: %w", err) - } - rowsAff, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("error getting rows affected: %w", err) - } - if rowsAff == 0 { - return ErrClaimDoesntExist - } - return nil -} - -func (c *ClaimSponsor) updateClaimStatus(globalIndex *big.Int, status ClaimStatus) error { - res, err := c.db.Exec( - `UPDATE claim SET status = $1 WHERE global_index = $2`, - status, globalIndex.String(), - ) - if err != nil { - return fmt.Errorf("error updating claim status: %w", err) - } - rowsAff, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("error getting rows affected: %w", err) - } - if rowsAff == 0 { - return ErrClaimDoesntExist - } - return nil -} - -func (c *ClaimSponsor) waitForTxResult(ctx context.Context, txID string) (ClaimStatus, error) { - ticker := time.NewTicker(c.waitTxToBeMinedPeriod) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return "", errors.New("context cancelled") - case <-ticker.C: - status, err := c.sender.claimStatus(ctx, txID) - if err != nil { - return "", err - } - - if status == FailedClaimStatus || status == SuccessClaimStatus { - return status, nil - } - } - } -} - -func (c *ClaimSponsor) AddClaimToQueue(claim *Claim) error { - claim.Status = PendingClaimStatus - return meddler.Insert(c.db, "claim", claim) -} - -func (c *ClaimSponsor) GetClaim(globalIndex *big.Int) (*Claim, error) { - claim := &Claim{} - err := meddler.QueryRow( - c.db, claim, `SELECT * FROM claim WHERE global_index = $1`, globalIndex.String(), - ) - return claim, db.ReturnErrNotFound(err) -} diff --git a/claimsponsor/e2e_test.go b/claimsponsor/e2e_test.go deleted file mode 100644 index 5ad332c0f..000000000 --- a/claimsponsor/e2e_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package claimsponsor_test - -import ( - "context" - "errors" - "fmt" - "math/big" - "path" - "testing" - "time" - - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestE2EL1toEVML2(t *testing.T) { - // start other needed components - ctx := context.Background() - setup := helpers.NewE2EEnvWithEVML2(t) - - // start claim sponsor - dbPathClaimSponsor := path.Join(t.TempDir(), "claimsponsorTestE2EL1toEVML2_cs.sqlite") - claimer, err := claimsponsor.NewEVMClaimSponsor( - log.GetDefaultLogger(), - dbPathClaimSponsor, - setup.L2Environment.SimBackend.Client(), - setup.L2Environment.BridgeAddr, - setup.L2Environment.Auth.From, - 200_000, - 0, - setup.EthTxManagerMock, - 0, 0, time.Millisecond*10, time.Millisecond*10, - ) - require.NoError(t, err) - go claimer.Start(ctx) - - // test - for i := uint32(0); i < 3; i++ { - // Send bridges to L2, wait for GER to be injected on L2 - amount := new(big.Int).SetUint64(uint64(i) + 1) - setup.L1Environment.Auth.Value = amount - _, err := setup.L1Environment.BridgeContract.BridgeAsset(setup.L1Environment.Auth, setup.NetworkIDL2, setup.L2Environment.Auth.From, amount, common.Address{}, true, nil) - require.NoError(t, err) - setup.L1Environment.SimBackend.Commit() - time.Sleep(time.Millisecond * 300) - - expectedGER, err := setup.L1Environment.GERContract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - isInjected, err := setup.L2Environment.AggoracleSender.IsGERInjected(expectedGER) - require.NoError(t, err) - require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) - - // Build MP using bridgeSyncL1 & env.InfoTreeSync - info, err := setup.L1Environment.InfoTreeSync.GetInfoByIndex(ctx, i) - require.NoError(t, err) - - localProof, err := setup.L1Environment.BridgeSync.GetProof(ctx, i, info.MainnetExitRoot) - require.NoError(t, err) - - rollupProof, err := setup.L1Environment.InfoTreeSync.GetRollupExitTreeMerkleProof(ctx, 0, common.Hash{}) - require.NoError(t, err) - - // Request to sponsor claim - globalIndex := bridgesync.GenerateGlobalIndex(true, 0, i) - err = claimer.AddClaimToQueue(&claimsponsor.Claim{ - LeafType: claimsponsor.LeafTypeAsset, - ProofLocalExitRoot: localProof, - ProofRollupExitRoot: rollupProof, - GlobalIndex: globalIndex, - MainnetExitRoot: info.MainnetExitRoot, - RollupExitRoot: info.RollupExitRoot, - OriginNetwork: 0, - OriginTokenAddress: common.Address{}, - DestinationNetwork: setup.NetworkIDL2, - DestinationAddress: setup.L2Environment.Auth.From, - Amount: amount, - Metadata: nil, - }) - require.NoError(t, err) - - // Wait until success - succeed := false - for i := 0; i < 10; i++ { - claim, err := claimer.GetClaim(globalIndex) - require.NoError(t, err) - if claim.Status == claimsponsor.FailedClaimStatus { - require.NoError(t, errors.New("claim failed")) - } else if claim.Status == claimsponsor.SuccessClaimStatus { - succeed = true - - break - } - time.Sleep(100 * time.Millisecond) - } - require.True(t, succeed) - - // Check on contract that is claimed - isClaimed, err := setup.L2Environment.BridgeContract.IsClaimed(&bind.CallOpts{Pending: false}, i, 0) - require.NoError(t, err) - require.True(t, isClaimed) - } -} diff --git a/claimsponsor/evmclaimsponsor.go b/claimsponsor/evmclaimsponsor.go deleted file mode 100644 index 8915c6836..000000000 --- a/claimsponsor/evmclaimsponsor.go +++ /dev/null @@ -1,215 +0,0 @@ -package claimsponsor - -import ( - "context" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmbridgev2" - configTypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" - ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -const ( - // LeafTypeAsset represents a bridge asset - LeafTypeAsset uint8 = 0 - // LeafTypeMessage represents a bridge message - LeafTypeMessage uint8 = 1 - - gasTooHighErrTemplate = "Claim tx estimated to consume more gas than the maximum allowed by the service. " + - "Estimated %d, maximum allowed: %d" -) - -type EthClienter interface { - ethereum.GasEstimator - bind.ContractBackend -} - -type EthTxManager interface { - Remove(ctx context.Context, id common.Hash) error - ResultsByStatus(ctx context.Context, statuses []ethtxtypes.MonitoredTxStatus) ([]ethtxtypes.MonitoredTxResult, error) - Result(ctx context.Context, id common.Hash) (ethtxtypes.MonitoredTxResult, error) - Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, - gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) -} - -type EVMClaimSponsor struct { - l2Client EthClienter - bridgeABI *abi.ABI - bridgeAddr common.Address - ethTxManager EthTxManager - sender common.Address - gasOffest uint64 - maxGas uint64 -} - -type EVMClaimSponsorConfig struct { - // DBPath path of the DB - DBPath string `mapstructure:"DBPath"` - // Enabled indicates if the sponsor should be run or not - Enabled bool `mapstructure:"Enabled"` - // SenderAddr is the address that will be used to send the claim txs - SenderAddr common.Address `mapstructure:"SenderAddr"` - // BridgeAddrL2 is the address of the bridge smart contract on L2 - BridgeAddrL2 common.Address `mapstructure:"BridgeAddrL2"` - // MaxGas is the max gas (limit) allowed for a claim to be sponsored - MaxGas uint64 `mapstructure:"MaxGas"` - // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry - RetryAfterErrorPeriod configTypes.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. - // Any number smaller than zero will be considered as unlimited retries - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` - // WaitTxToBeMinedPeriod is the period that will be used to ask if a given tx has been mined (or failed) - WaitTxToBeMinedPeriod configTypes.Duration `mapstructure:"WaitTxToBeMinedPeriod"` - // WaitOnEmptyQueue is the time that will be waited before trying to send the next claim of the queue - // if the queue is empty - WaitOnEmptyQueue configTypes.Duration `mapstructure:"WaitOnEmptyQueue"` - // EthTxManager is the configuration of the EthTxManager to be used by the claim sponsor - EthTxManager ethtxmanager.Config `mapstructure:"EthTxManager"` - // GasOffset is the gas to add on top of the estimated gas when sending the claim txs - GasOffset uint64 `mapstructure:"GasOffset"` -} - -func NewEVMClaimSponsor( - logger *log.Logger, - dbPath string, - l2Client EthClienter, - bridgeAddr common.Address, - sender common.Address, - maxGas, gasOffset uint64, - ethTxManager EthTxManager, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - waitTxToBeMinedPeriod time.Duration, - waitOnEmptyQueue time.Duration, -) (*ClaimSponsor, error) { - abi, err := polygonzkevmbridgev2.Polygonzkevmbridgev2MetaData.GetAbi() - if err != nil { - return nil, err - } - - evmSponsor := &EVMClaimSponsor{ - l2Client: l2Client, - bridgeABI: abi, - bridgeAddr: bridgeAddr, - sender: sender, - gasOffest: gasOffset, - maxGas: maxGas, - ethTxManager: ethTxManager, - } - - baseSponsor, err := newClaimSponsor( - logger, - dbPath, - evmSponsor, - retryAfterErrorPeriod, - maxRetryAttemptsAfterError, - waitTxToBeMinedPeriod, - waitOnEmptyQueue, - ) - if err != nil { - return nil, err - } - - return baseSponsor, nil -} - -func (c *EVMClaimSponsor) checkClaim(ctx context.Context, claim *Claim) error { - data, err := c.buildClaimTxData(claim) - if err != nil { - return err - } - gas, err := c.l2Client.EstimateGas(ctx, ethereum.CallMsg{ - From: c.sender, - To: &c.bridgeAddr, - Data: data, - }) - if err != nil { - return err - } - if gas > c.maxGas { - return fmt.Errorf(gasTooHighErrTemplate, gas, c.maxGas) - } - - return nil -} - -func (c *EVMClaimSponsor) sendClaim(ctx context.Context, claim *Claim) (string, error) { - data, err := c.buildClaimTxData(claim) - if err != nil { - return "", err - } - id, err := c.ethTxManager.Add(ctx, &c.bridgeAddr, common.Big0, data, c.gasOffest, nil) - if err != nil { - return "", err - } - - return id.Hex(), nil -} - -func (c *EVMClaimSponsor) claimStatus(ctx context.Context, id string) (ClaimStatus, error) { - res, err := c.ethTxManager.Result(ctx, common.HexToHash(id)) - if err != nil { - return "", err - } - switch res.Status { - case ethtxtypes.MonitoredTxStatusCreated, - ethtxtypes.MonitoredTxStatusSent: - return WIPClaimStatus, nil - case ethtxtypes.MonitoredTxStatusFailed: - return FailedClaimStatus, nil - case ethtxtypes.MonitoredTxStatusMined, - ethtxtypes.MonitoredTxStatusSafe, - ethtxtypes.MonitoredTxStatusFinalized: - log.Infof("claim tx with id %s mined at block %d", id, res.MinedAtBlockNumber) - - return SuccessClaimStatus, nil - default: - return "", fmt.Errorf("unexpected tx status: %v", res.Status) - } -} - -func (c *EVMClaimSponsor) buildClaimTxData(claim *Claim) ([]byte, error) { - switch claim.LeafType { - case LeafTypeAsset: - return c.bridgeABI.Pack( - "claimAsset", - claim.ProofLocalExitRoot, // bytes32[32] smtProofLocalExitRoot - claim.ProofRollupExitRoot, // bytes32[32] smtProofRollupExitRoot - claim.GlobalIndex, // uint256 globalIndex - claim.MainnetExitRoot, // bytes32 mainnetExitRoot - claim.RollupExitRoot, // bytes32 rollupExitRoot - claim.OriginNetwork, // uint32 originNetwork - claim.OriginTokenAddress, // address originTokenAddress, - claim.DestinationNetwork, // uint32 destinationNetwork - claim.DestinationAddress, // address destinationAddress - claim.Amount, // uint256 amount - claim.Metadata, // bytes metadata - ) - case LeafTypeMessage: - return c.bridgeABI.Pack( - "claimMessage", - claim.ProofLocalExitRoot, // bytes32[32] smtProofLocalExitRoot - claim.ProofRollupExitRoot, // bytes32[32] smtProofRollupExitRoot - claim.GlobalIndex, // uint256 globalIndex - claim.MainnetExitRoot, // bytes32 mainnetExitRoot - claim.RollupExitRoot, // bytes32 rollupExitRoot - claim.OriginNetwork, // uint32 originNetwork - claim.OriginTokenAddress, // address originTokenAddress, - claim.DestinationNetwork, // uint32 destinationNetwork - claim.DestinationAddress, // address destinationAddress - claim.Amount, // uint256 amount - claim.Metadata, // bytes metadata - ) - default: - return nil, fmt.Errorf("unexpected leaf type %d", claim.LeafType) - } -} diff --git a/claimsponsor/migrations/claimsponsor0001.sql b/claimsponsor/migrations/claimsponsor0001.sql deleted file mode 100644 index 9e4586ea3..000000000 --- a/claimsponsor/migrations/claimsponsor0001.sql +++ /dev/null @@ -1,20 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS claim; - --- +migrate Up -CREATE TABLE claim ( - leaf_type INT NOT NULL, - proof_local_exit_root VARCHAR NOT NULL, - proof_rollup_exit_root VARCHAR NOT NULL, - global_index VARCHAR NOT NULL, - mainnet_exit_root VARCHAR NOT NULL, - rollup_exit_root VARCHAR NOT NULL, - origin_network INT NOT NULL, - origin_token_address VARCHAR NOT NULL, - destination_network INT NOT NULL, - destination_address VARCHAR NOT NULL, - amount VARCHAR NOT NULL, - metadata VARCHAR, - status VARCHAR NOT NULL, - tx_id VARCHAR NOT NULL -); \ No newline at end of file diff --git a/claimsponsor/migrations/migrations.go b/claimsponsor/migrations/migrations.go deleted file mode 100644 index 9166b5b3a..000000000 --- a/claimsponsor/migrations/migrations.go +++ /dev/null @@ -1,21 +0,0 @@ -package migrations - -import ( - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" -) - -//go:embed claimsponsor0001.sql -var mig001 string - -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "claimsponsor0001", - SQL: mig001, - }, - } - return db.RunMigrations(dbPath, migrations) -} diff --git a/cmd/main.go b/cmd/main.go index 2b412f90a..3aa921444 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -6,17 +6,12 @@ import ( zkevm "github.com/0xPolygon/cdk" "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/urfave/cli/v2" ) const appName = "cdk" -const ( - // NETWORK_CONFIGFILE name to identify the network_custom (genesis) config-file - NETWORK_CONFIGFILE = "custom_network" //nolint:stylecheck -) - var ( configFileFlag = cli.StringSliceFlag{ Name: config.FlagCfg, @@ -41,8 +36,7 @@ var ( Aliases: []string{"co"}, Usage: "List of components to run", Required: false, - Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR, - common.AGGORACLE, common.BRIDGE, common.AGGSENDER), + Value: cli.NewStringSlice(common.SEQUENCE_SENDER, common.AGGREGATOR), } saveConfigFlag = cli.StringFlag{ Name: config.FlagSaveConfigPath, diff --git a/cmd/run.go b/cmd/run.go index a3b222c2a..7de38f724 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -12,14 +12,8 @@ import ( zkevm "github.com/0xPolygon/cdk" dataCommitteeClient "github.com/0xPolygon/cdk-data-availability/client" jRPC "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/aggoracle/chaingersender" "github.com/0xPolygon/cdk/aggregator" "github.com/0xPolygon/cdk/aggregator/db" - "github.com/0xPolygon/cdk/aggsender" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/claimsponsor" cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/config" "github.com/0xPolygon/cdk/dataavailability" @@ -27,18 +21,15 @@ import ( "github.com/0xPolygon/cdk/etherman" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/lastgersync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/rpc" "github.com/0xPolygon/cdk/sequencesender" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/translator" ethtxman "github.com/0xPolygon/zkevm-ethtx-manager/etherman" "github.com/0xPolygon/zkevm-ethtx-manager/etherman/etherscan" - "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" - ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" + aggkitetherman "github.com/agglayer/aggkit/etherman" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/reorgdetector" "github.com/ethereum/go-ethereum/ethclient" "github.com/urfave/cli/v2" ) @@ -60,7 +51,6 @@ func start(cliCtx *cli.Context) error { components := cliCtx.StringSlice(config.FlagComponents) l1Client := runL1ClientIfNeeded(components, cfg.Etherman.URL) - l2Client := runL2ClientIfNeeded(components, getL2RPCUrl(cfg)) reorgDetectorL1, errChanL1 := runReorgDetectorL1IfNeeded(cliCtx.Context, components, l1Client, &cfg.ReorgDetectorL1) go func() { if err := <-errChanL1; err != nil { @@ -68,23 +58,7 @@ func start(cliCtx *cli.Context) error { } }() - reorgDetectorL2, errChanL2 := runReorgDetectorL2IfNeeded(cliCtx.Context, components, l2Client, &cfg.ReorgDetectorL2) - go func() { - if err := <-errChanL2; err != nil { - log.Fatal("Error from ReorgDetectorL2: ", err) - } - }() - - rollupID := getRollUpIDIfNeeded(components, cfg.NetworkConfig.L1Config, l1Client) l1InfoTreeSync := runL1InfoTreeSyncerIfNeeded(cliCtx.Context, components, *cfg, l1Client, reorgDetectorL1) - claimSponsor := runClaimSponsorIfNeeded(cliCtx.Context, components, l2Client, cfg.ClaimSponsor) - l1BridgeSync := runBridgeSyncL1IfNeeded(cliCtx.Context, components, cfg.BridgeL1Sync, reorgDetectorL1, - l1Client, 0) - l2BridgeSync := runBridgeSyncL2IfNeeded(cliCtx.Context, components, cfg.BridgeL2Sync, reorgDetectorL2, - l2Client, rollupID) - lastGERSync := runLastGERSyncIfNeeded( - cliCtx.Context, components, cfg.LastGERSync, reorgDetectorL2, l2Client, l1InfoTreeSync, - ) var rpcServices []jRPC.Service for _, component := range components { switch component { @@ -103,35 +77,6 @@ func start(cliCtx *cli.Context) error { log.Fatal(err) } }() - case cdkcommon.AGGORACLE: - aggOracle := createAggoracle(*cfg, l1Client, l2Client, l1InfoTreeSync) - go aggOracle.Start(cliCtx.Context) - case cdkcommon.BRIDGE: - rpcBridge := createBridgeRPC( - cfg.RPC, - cfg.Common.NetworkID, - claimSponsor, - l1InfoTreeSync, - lastGERSync, - l1BridgeSync, - l2BridgeSync, - ) - rpcServices = append(rpcServices, rpcBridge...) - - case cdkcommon.AGGSENDER: - aggsender, err := createAggSender( - cliCtx.Context, - cfg.AggSender, - l1Client, - l1InfoTreeSync, - l2BridgeSync, - ) - if err != nil { - log.Fatal(err) - } - rpcServices = append(rpcServices, aggsender.GetRPCServices()...) - - go aggsender.Start(cliCtx.Context) } } if len(rpcServices) > 0 { @@ -147,40 +92,6 @@ func start(cliCtx *cli.Context) error { return nil } -func createAggSender( - ctx context.Context, - cfg aggsender.Config, - l1EthClient *ethclient.Client, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - l2Syncer *bridgesync.BridgeSync) (*aggsender.AggSender, error) { - logger := log.WithFields("module", cdkcommon.AGGSENDER) - agglayerClient := agglayer.NewAggLayerClient(cfg.AggLayerURL) - blockNotifier, err := aggsender.NewBlockNotifierPolling(l1EthClient, aggsender.ConfigBlockNotifierPolling{ - BlockFinalityType: etherman.BlockNumberFinality(cfg.BlockFinality), - CheckNewBlockInterval: aggsender.AutomaticBlockInterval, - }, logger, nil) - if err != nil { - return nil, err - } - - notifierCfg, err := aggsender.NewConfigEpochNotifierPerBlock(agglayerClient, cfg.EpochNotificationPercentage) - if err != nil { - return nil, fmt.Errorf("cant generate config for Epoch Notifier because: %w", err) - } - epochNotifier, err := aggsender.NewEpochNotifierPerBlock( - blockNotifier, - logger, - *notifierCfg, nil) - if err != nil { - return nil, err - } - log.Infof("Starting blockNotifier: %s", blockNotifier.String()) - go blockNotifier.Start(ctx) - log.Infof("Starting epochNotifier: %s", epochNotifier.String()) - go epochNotifier.Start(ctx) - return aggsender.New(ctx, logger, cfg, agglayerClient, l1InfoTreeSync, l2Syncer, epochNotifier) -} - func createAggregator(ctx context.Context, c config.Config, runMigrations bool) *aggregator.Aggregator { logger := log.WithFields("module", cdkcommon.AGGREGATOR) // Migrations @@ -325,75 +236,6 @@ func newTxBuilder( return txBuilder, err } -func createAggoracle( - cfg config.Config, - l1Client, - l2Client *ethclient.Client, - syncer *l1infotreesync.L1InfoTreeSync, -) *aggoracle.AggOracle { - logger := log.WithFields("module", cdkcommon.AGGORACLE) - ethermanClient, err := etherman.NewClient(cfg.Etherman, cfg.NetworkConfig.L1Config, cfg.Common) - if err != nil { - logger.Fatal(err) - } - l2ChainID, err := ethermanClient.GetL2ChainID() - if err != nil { - logger.Errorf("Failed to retrieve L2ChainID: %v", err) - } - - // sanity check for the aggOracle ChainID - if cfg.AggOracle.EVMSender.EthTxManager.Etherman.L1ChainID != l2ChainID { - logger.Warnf("Incorrect ChainID in aggOracle provided: %d expected: %d", - cfg.AggOracle.EVMSender.EthTxManager.Etherman.L1ChainID, - l2ChainID, - ) - } - - var sender aggoracle.ChainSender - switch cfg.AggOracle.TargetChainType { - case aggoracle.EVMChain: - cfg.AggOracle.EVMSender.EthTxManager.Log = ethtxlog.Config{ - Environment: ethtxlog.LogEnvironment(cfg.Log.Environment), - Level: cfg.Log.Level, - Outputs: cfg.Log.Outputs, - } - ethTxManager, err := ethtxmanager.New(cfg.AggOracle.EVMSender.EthTxManager) - if err != nil { - log.Fatal(err) - } - go ethTxManager.Start() - sender, err = chaingersender.NewEVMChainGERSender( - logger, - cfg.AggOracle.EVMSender.GlobalExitRootL2Addr, - l2Client, - ethTxManager, - cfg.AggOracle.EVMSender.GasOffset, - cfg.AggOracle.EVMSender.WaitPeriodMonitorTx.Duration, - ) - if err != nil { - log.Fatal(err) - } - default: - log.Fatalf( - "Unsupported chaintype %s. Supported values: %v", - cfg.AggOracle.TargetChainType, aggoracle.SupportedChainTypes, - ) - } - aggOracle, err := aggoracle.New( - logger, - sender, - l1Client, - syncer, - etherman.BlockNumberFinality(cfg.AggOracle.BlockFinality), - cfg.AggOracle.WaitPeriodNextGER.Duration, - ) - if err != nil { - logger.Fatal(err) - } - - return aggOracle -} - func newDataAvailability(c config.Config, etherman *etherman.Client) (*dataavailability.DataAvailability, error) { if !c.Common.IsValidiumMode { return nil, nil @@ -526,8 +368,7 @@ func runL1InfoTreeSyncerIfNeeded( l1Client *ethclient.Client, reorgDetector *reorgdetector.ReorgDetector, ) *l1infotreesync.L1InfoTreeSync { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.BRIDGE, - cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGSENDER, cdkcommon.L1INFOTREESYNC}, components) { + if !isNeeded([]string{cdkcommon.SEQUENCE_SENDER, cdkcommon.L1INFOTREESYNC}, components) { return nil } l1InfoTreeSync, err := l1infotreesync.New( @@ -536,7 +377,7 @@ func runL1InfoTreeSyncerIfNeeded( cfg.L1InfoTreeSync.GlobalExitRootAddr, cfg.L1InfoTreeSync.RollupManagerAddr, cfg.L1InfoTreeSync.SyncBlockChunkSize, - etherman.BlockNumberFinality(cfg.L1InfoTreeSync.BlockFinality), + aggkitetherman.NewBlockNumberFinality(cfg.L1InfoTreeSync.BlockFinality), reorgDetector, l1Client, cfg.L1InfoTreeSync.WaitForNewBlocksPeriod.Duration, @@ -544,7 +385,7 @@ func runL1InfoTreeSyncerIfNeeded( cfg.L1InfoTreeSync.RetryAfterErrorPeriod.Duration, cfg.L1InfoTreeSync.MaxRetryAttemptsAfterError, l1infotreesync.FlagNone, - etherman.FinalizedBlock, + aggkitetherman.FinalizedBlock, ) if err != nil { log.Fatal(err) @@ -556,10 +397,7 @@ func runL1InfoTreeSyncerIfNeeded( func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client { if !isNeeded([]string{ - cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.AGGORACLE, cdkcommon.BRIDGE, - cdkcommon.AGGSENDER, - cdkcommon.L1INFOTREESYNC, + cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.L1INFOTREESYNC, }, components) { return nil } @@ -572,13 +410,8 @@ func runL1ClientIfNeeded(components []string, urlRPCL1 string) *ethclient.Client return l1CLient } -func getRollUpIDIfNeeded(components []string, networkConfig ethermanconfig.L1Config, +func getRollupID(networkConfig ethermanconfig.L1Config, l1Client *ethclient.Client) uint32 { - if !isNeeded([]string{ - cdkcommon.AGGSENDER, - }, components) { - return 0 - } rollupID, err := etherman.GetRollupID(networkConfig, networkConfig.ZkEVMAddr, l1Client) if err != nil { log.Fatal(err) @@ -586,30 +419,13 @@ func getRollUpIDIfNeeded(components []string, networkConfig ethermanconfig.L1Con return rollupID } -func runL2ClientIfNeeded(components []string, urlRPCL2 string) *ethclient.Client { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.BRIDGE, cdkcommon.AGGSENDER}, components) { - return nil - } - - log.Infof("dialing L2 client at: %s", urlRPCL2) - l2CLient, err := ethclient.Dial(urlRPCL2) - if err != nil { - log.Fatal(err) - } - - return l2CLient -} - func runReorgDetectorL1IfNeeded( ctx context.Context, components []string, l1Client *ethclient.Client, cfg *reorgdetector.Config, ) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{ - cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, - cdkcommon.AGGORACLE, cdkcommon.BRIDGE, cdkcommon.AGGSENDER, - cdkcommon.L1INFOTREESYNC}, + if !isNeeded([]string{cdkcommon.SEQUENCE_SENDER, cdkcommon.AGGREGATOR, cdkcommon.L1INFOTREESYNC}, components) { return nil, nil } @@ -626,210 +442,15 @@ func runReorgDetectorL1IfNeeded( return rd, errChan } -func runReorgDetectorL2IfNeeded( - ctx context.Context, - components []string, - l2Client *ethclient.Client, - cfg *reorgdetector.Config, -) (*reorgdetector.ReorgDetector, chan error) { - if !isNeeded([]string{cdkcommon.AGGORACLE, cdkcommon.BRIDGE, cdkcommon.AGGSENDER}, components) { - return nil, nil - } - rd := newReorgDetector(cfg, l2Client, reorgdetector.L2) - - errChan := make(chan error) - go func() { - if err := rd.Start(ctx); err != nil { - errChan <- err - } - close(errChan) - }() - - return rd, errChan -} - -func runClaimSponsorIfNeeded( - ctx context.Context, - components []string, - l2Client *ethclient.Client, - cfg claimsponsor.EVMClaimSponsorConfig, -) *claimsponsor.ClaimSponsor { - if !isNeeded([]string{cdkcommon.BRIDGE}, components) || !cfg.Enabled { - return nil - } - - logger := log.WithFields("module", cdkcommon.CLAIM_SPONSOR) - // In the future there may support different backends other than EVM, and this will require different config. - // But today only EVM is supported - ethTxManagerL2, err := ethtxmanager.New(cfg.EthTxManager) - if err != nil { - logger.Fatal(err) - } - go ethTxManagerL2.Start() - cs, err := claimsponsor.NewEVMClaimSponsor( - logger, - cfg.DBPath, - l2Client, - cfg.BridgeAddrL2, - cfg.SenderAddr, - cfg.MaxGas, - cfg.GasOffset, - ethTxManagerL2, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - cfg.WaitTxToBeMinedPeriod.Duration, - cfg.WaitTxToBeMinedPeriod.Duration, - ) - if err != nil { - logger.Fatalf("error creating claim sponsor: %s", err) - } - go cs.Start(ctx) - - return cs -} - -func runLastGERSyncIfNeeded( - ctx context.Context, - components []string, - cfg lastgersync.Config, - reorgDetectorL2 *reorgdetector.ReorgDetector, - l2Client *ethclient.Client, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, -) *lastgersync.LastGERSync { - if !isNeeded([]string{cdkcommon.BRIDGE}, components) { - return nil - } - lastGERSync, err := lastgersync.New( - ctx, - cfg.DBPath, - reorgDetectorL2, - l2Client, - cfg.GlobalExitRootL2Addr, - l1InfoTreeSync, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - etherman.BlockNumberFinality(cfg.BlockFinality), - cfg.WaitForNewBlocksPeriod.Duration, - cfg.DownloadBufferSize, - ) - if err != nil { - log.Fatalf("error creating lastGERSync: %s", err) - } - go lastGERSync.Start(ctx) - - return lastGERSync -} - -func runBridgeSyncL1IfNeeded( - ctx context.Context, - components []string, - cfg bridgesync.Config, - reorgDetectorL1 *reorgdetector.ReorgDetector, - l1Client *ethclient.Client, - rollupID uint32, -) *bridgesync.BridgeSync { - if !isNeeded([]string{cdkcommon.BRIDGE}, components) { - return nil - } - - bridgeSyncL1, err := bridgesync.NewL1( - ctx, - cfg.DBPath, - cfg.BridgeAddr, - cfg.SyncBlockChunkSize, - etherman.BlockNumberFinality(cfg.BlockFinality), - reorgDetectorL1, - l1Client, - cfg.InitialBlockNum, - cfg.WaitForNewBlocksPeriod.Duration, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - rollupID, - false, - etherman.FinalizedBlock, - ) - if err != nil { - log.Fatalf("error creating bridgeSyncL1: %s", err) - } - go bridgeSyncL1.Start(ctx) - - return bridgeSyncL1 -} - -func runBridgeSyncL2IfNeeded( - ctx context.Context, - components []string, - cfg bridgesync.Config, - reorgDetectorL2 *reorgdetector.ReorgDetector, - l2Client *ethclient.Client, - rollupID uint32, -) *bridgesync.BridgeSync { - if !isNeeded([]string{cdkcommon.BRIDGE, cdkcommon.AGGSENDER}, components) { - return nil - } - - bridgeSyncL2, err := bridgesync.NewL2( - ctx, - cfg.DBPath, - cfg.BridgeAddr, - cfg.SyncBlockChunkSize, - etherman.BlockNumberFinality(cfg.BlockFinality), - reorgDetectorL2, - l2Client, - cfg.InitialBlockNum, - cfg.WaitForNewBlocksPeriod.Duration, - cfg.RetryAfterErrorPeriod.Duration, - cfg.MaxRetryAttemptsAfterError, - rollupID, - true, - etherman.LatestBlock, - ) - if err != nil { - log.Fatalf("error creating bridgeSyncL2: %s", err) - } - go bridgeSyncL2.Start(ctx) - - return bridgeSyncL2 -} - -func createBridgeRPC( - cfg jRPC.Config, - cdkNetworkID uint32, - sponsor *claimsponsor.ClaimSponsor, - l1InfoTree *l1infotreesync.L1InfoTreeSync, - injectedGERs *lastgersync.LastGERSync, - bridgeL1 *bridgesync.BridgeSync, - bridgeL2 *bridgesync.BridgeSync, -) []jRPC.Service { - logger := log.WithFields("module", cdkcommon.BRIDGE) - services := []jRPC.Service{ - { - Name: rpc.BRIDGE, - Service: rpc.NewBridgeEndpoints( - logger, - cfg.WriteTimeout.Duration, - cfg.ReadTimeout.Duration, - cdkNetworkID, - sponsor, - l1InfoTree, - injectedGERs, - bridgeL1, - bridgeL2, - ), - }, - } - return services -} - func createRPC(cfg jRPC.Config, services []jRPC.Service) *jRPC.Server { logger := log.WithFields("module", "RPC") return jRPC.NewServer(cfg, services, jRPC.WithLogger(logger.GetSugaredLogger())) } func getL2RPCUrl(c *config.Config) string { - if c.AggSender.URLRPCL2 != "" { - return c.AggSender.URLRPCL2 + if c.SequenceSender.RPCURL != "" { + return c.SequenceSender.RPCURL } - return c.AggOracle.EVMSender.URLRPCL2 + return "" } diff --git a/common/common.go b/common/common.go index 2fe71dce2..9fddbc8f9 100644 --- a/common/common.go +++ b/common/common.go @@ -8,7 +8,7 @@ import ( "path/filepath" "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/iden3/go-iden3-crypto/keccak256" diff --git a/common/components.go b/common/components.go index 2b562cff7..f60c70c42 100644 --- a/common/components.go +++ b/common/components.go @@ -5,16 +5,10 @@ const ( SEQUENCE_SENDER = "sequence-sender" //nolint:stylecheck // AGGREGATOR name to identify the aggregator component AGGREGATOR = "aggregator" - // AGGORACLE name to identify the aggoracle component - AGGORACLE = "aggoracle" - // BRIDGE name to identify the bridge component (have RPC) - BRIDGE = "bridge" // CLAIM_SPONSOR name to identify the claim sponsor component CLAIM_SPONSOR = "claim-sponsor" //nolint:stylecheck // PROVER name to identify the prover component PROVER = "prover" - // AGGSENDER name to identify the aggsender component - AGGSENDER = "aggsender" // L1INFOTREESYNC name to identify the l1infotreesync component L1INFOTREESYNC = "l1infotreesync" ) diff --git a/config/config.go b/config/config.go index 9363b93b7..8df52df6b 100644 --- a/config/config.go +++ b/config/config.go @@ -8,18 +8,13 @@ import ( "strings" jRPC "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/aggoracle" "github.com/0xPolygon/cdk/aggregator" - "github.com/0xPolygon/cdk/aggsender" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/claimsponsor" "github.com/0xPolygon/cdk/common" ethermanconfig "github.com/0xPolygon/cdk/etherman/config" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/lastgersync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/sequencesender" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/log" + "github.com/agglayer/aggkit/reorgdetector" "github.com/mitchellh/mapstructure" "github.com/pelletier/go-toml/v2" "github.com/spf13/viper" @@ -128,43 +123,30 @@ The file is [TOML format] type Config struct { // Configuration of the etherman (client for access L1) Etherman ethermanconfig.Config + // Configuration of the aggregator Aggregator aggregator.Config + // Configure Log level for all the services, allow also to store the logs in a file Log log.Config + // Configuration of the genesis of the network. This is used to known the initial state of the network NetworkConfig NetworkConfig + // Configuration of the sequence sender service SequenceSender sequencesender.Config + // Common Config that affects all the services Common common.Config + // Configuration of the reorg detector service to be used for the L1 ReorgDetectorL1 reorgdetector.Config - // Configuration of the reorg detector service to be used for the L2 - ReorgDetectorL2 reorgdetector.Config - // Configuration of the aggOracle service - AggOracle aggoracle.Config + // Configuration of the L1 Info Treee Sync service L1InfoTreeSync l1infotreesync.Config // RPC is the config for the RPC server RPC jRPC.Config - - // ClaimSponsor is the config for the claim sponsor - ClaimSponsor claimsponsor.EVMClaimSponsorConfig - - // BridgeL1Sync is the configuration for the synchronizer of the bridge of the L1 - BridgeL1Sync bridgesync.Config - - // BridgeL2Sync is the configuration for the synchronizer of the bridge of the L2 - BridgeL2Sync bridgesync.Config - - // LastGERSync is the config for the synchronizer in charge of syncing the last GER injected on L2. - // Needed for the bridge service (RPC) - LastGERSync lastgersync.Config - - // AggSender is the configuration of the agg sender service - AggSender aggsender.Config } // Load loads the configuration diff --git a/config/config_render.go b/config/config_render.go index ac251d300..cecdef766 100644 --- a/config/config_render.go +++ b/config/config_render.go @@ -7,7 +7,7 @@ import ( "regexp" "strings" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/knadh/koanf/parsers/json" "github.com/knadh/koanf/parsers/toml" "github.com/knadh/koanf/providers/rawbytes" diff --git a/config/default.go b/config/default.go index 9fc21c28d..a51c5e5b0 100644 --- a/config/default.go +++ b/config/default.go @@ -202,9 +202,6 @@ DBPath = "{{PathRWData}}/aggregator_db.sqlite" [ReorgDetectorL1] DBPath = "{{PathRWData}}/reorgdetectorl1.sqlite" -[ReorgDetectorL2] -DBPath = "{{PathRWData}}/reorgdetectorl2.sqlite" - [L1InfoTreeSync] DBPath = "{{PathRWData}}/L1InfoTreeSync.sqlite" GlobalExitRootAddr="{{NetworkConfig.L1.GlobalExitRootManagerAddr}}" @@ -217,39 +214,6 @@ InitialBlock={{genesisBlockNumber}} RetryAfterErrorPeriod="1s" MaxRetryAttemptsAfterError=-1 -[AggOracle] -TargetChainType="EVM" -URLRPCL1="{{L1URL}}" -BlockFinality="FinalizedBlock" -WaitPeriodNextGER="100ms" - [AggOracle.EVMSender] - GlobalExitRootL2="{{L2Config.GlobalExitRootAddr}}" - URLRPCL2="{{L2URL}}" - GasOffset=0 - WaitPeriodMonitorTx="100ms" - [AggOracle.EVMSender.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2s" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/app/keystore/aggoracle.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - StoragePath = "{{PathRWData}}/ethtxmanager-sequencesender.sqlite" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 5 - FinalizedStatusL1NumberOfBlocks = 10 - [AggOracle.EVMSender.EthTxManager.Etherman] - URL = "{{L2URL}}" - MultiGasProvider = false - # L1ChainID = 0 indicates it will be set at runtime - # This field should be populated with L2ChainID - L1ChainID = 0 - HTTPHeaders = [] - [RPC] Host = "0.0.0.0" Port = 5576 @@ -257,68 +221,6 @@ ReadTimeout = "2s" WriteTimeout = "2s" MaxRequestsPerIPAndSecond = 10 -[ClaimSponsor] -DBPath = "{{PathRWData}}/claimsopnsor.sqlite" -Enabled = true -SenderAddr = "0xfa3b44587990f97ba8b6ba7e230a5f0e95d14b3d" -BridgeAddrL2 = "0xB7098a13a48EcE087d3DA15b2D28eCE0f89819B8" -MaxGas = 200000 -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitTxToBeMinedPeriod = "3s" -WaitOnEmptyQueue = "3s" -GasOffset = 0 - [ClaimSponsor.EthTxManager] - FrequencyToMonitorTxs = "1s" - WaitTxToBeMined = "2s" - GetReceiptMaxTime = "250ms" - GetReceiptWaitInterval = "1s" - PrivateKeys = [ - {Path = "/app/keystore/claimsopnsor.keystore", Password = "testonly"}, - ] - ForcedGas = 0 - GasPriceMarginFactor = 1 - MaxGasPriceLimit = 0 - StoragePath = "{{PathRWData}}/ethtxmanager-claimsponsor.sqlite" - ReadPendingL1Txs = false - SafeStatusL1NumberOfBlocks = 5 - FinalizedStatusL1NumberOfBlocks = 10 - [ClaimSponsor.EthTxManager.Etherman] - URL = "{{L2URL}}" - MultiGasProvider = false - L1ChainID = {{NetworkConfig.L1.L1ChainID}} - HTTPHeaders = [] - -[BridgeL1Sync] -DBPath = "{{PathRWData}}/bridgel1sync.sqlite" -BlockFinality = "LatestBlock" -InitialBlockNum = 0 -BridgeAddr = "{{polygonBridgeAddr}}" -SyncBlockChunkSize = 100 -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForNewBlocksPeriod = "3s" - -[BridgeL2Sync] -DBPath = "{{PathRWData}}/bridgel2sync.sqlite" -BlockFinality = "LatestBlock" -InitialBlockNum = 0 -BridgeAddr = "{{polygonBridgeAddr}}" -SyncBlockChunkSize = 100 -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForNewBlocksPeriod = "3s" - -[LastGERSync] -DBPath = "{{PathRWData}}/lastgersync.sqlite" -BlockFinality = "LatestBlock" -InitialBlockNum = 0 -GlobalExitRootL2Addr = "{{L2Config.GlobalExitRootAddr}}" -RetryAfterErrorPeriod = "1s" -MaxRetryAttemptsAfterError = -1 -WaitForNewBlocksPeriod = "1s" -DownloadBufferSize = 100 - [NetworkConfig.L1] L1ChainID = {{L1Config.chainId}} PolAddr = "{{L1Config.polTokenAddress}}" @@ -326,21 +228,4 @@ ZkEVMAddr = "{{L1Config.polygonZkEVMAddress}}" RollupManagerAddr = "{{L1Config.polygonRollupManagerAddress}}" GlobalExitRootManagerAddr = "{{L1Config.polygonZkEVMGlobalExitRootAddress}}" - -[AggSender] -StoragePath = "{{PathRWData}}/aggsender.sqlite" -AggLayerURL = "{{AggLayerURL}}" -AggsenderPrivateKey = {Path = "{{SequencerPrivateKeyPath}}", Password = "{{SequencerPrivateKeyPassword}}"} -URLRPCL2="{{L2URL}}" -BlockFinality = "LatestBlock" -EpochNotificationPercentage = 50 -SaveCertificatesToFilesPath = "" -MaxRetriesStoreCertificate = 3 -DelayBeetweenRetries = "60s" -KeepCertificatesHistory = true -# MaxSize of the certificate to 8Mb -MaxCertSize = 8388608 -BridgeMetadataAsHash = true -DryRun = false -EnableRPC = true ` diff --git a/crates/cdk/build.rs b/crates/cdk/build.rs index e9708d27a..7948427cb 100644 --- a/crates/cdk/build.rs +++ b/crates/cdk/build.rs @@ -7,63 +7,51 @@ use std::path::Path; use std::path::PathBuf; use std::process::Command; use serde_json::Value; - fn main() { let _ = build_versions(); - let build_script_disabled = env::var("BUILD_SCRIPT_DISABLED") .map(|v| v == "1") .unwrap_or(false); // run by default - if build_script_disabled { println!("cargo:warning=Build script is disabled. Skipping build."); return; } - // Determine the directory where the build script is located let dir = env::var("CARGO_MANIFEST_DIR").unwrap(); let build_path = PathBuf::from(dir + "/../.."); println!("cargo:rerun-if-changed=*.go"); - // Optionally, specify the directory where your Makefile is located // For this example, it's assumed to be the same as the build script's directory // If your Makefile is in a different directory, adjust `build_path` accordingly - // Call the make command let output = Command::new("make") .arg("build-go") // Create a new make command .current_dir(build_path) // Set the current directory for the command .output() // Execute the command and capture the output .expect("Failed to execute make command"); - // Check the output and react accordingly if !output.status.success() { // If the make command failed, print the error and exit let error_message = String::from_utf8_lossy(&output.stderr); panic!("Make command failed with error: {}", error_message); } - // Optionally, print the output of the make command println!( "Make command output: {}", String::from_utf8_lossy(&output.stdout) ); - // Here you can also add additional commands to inform Cargo about // how to rerun the build script. For example, to rerun this script // only when a specific file changes: // println!("cargo:rerun-if-changed=path/to/file"); } - // build_versions retrieves the versions from the Starlark file and embeds them in the binary. fn build_versions() -> io::Result<()> { // URL of the Starlark file let url = "https://raw.githubusercontent.com/0xPolygon/kurtosis-cdk/refs/heads/main/input_parser.star"; - // Download the file content let response = get(url).expect("Failed to send request"); let content = response.text().expect("Failed to read response text"); - // Extract the relevant lines (skip the first 30 lines, take the next 15) let raw_versions = content .lines() @@ -71,24 +59,19 @@ fn build_versions() -> io::Result<()> { .take(15) .collect::>() .join("\n"); - // Remove the declaration `DEFAULT_IMAGES = ` let raw_versions = raw_versions.replace("DEFAULT_IMAGES = ", ""); - // Clean up the content by removing comments and unnecessary spaces let re_comments = Regex::new(r"#.*$").unwrap(); // Regex to remove comments let re_trailing_commas = Regex::new(r",(\s*})").unwrap(); // Regex to fix trailing commas - let cleaned_versions = raw_versions .lines() .map(|line| re_comments.replace_all(line, "").trim().to_string()) // Remove comments and trim spaces .filter(|line| !line.is_empty()) // Filter out empty lines .collect::>() .join("\n"); - // Fix improperly placed trailing commas let cleaned_versions = re_trailing_commas.replace_all(&cleaned_versions, "$1"); - // Attempt to parse the cleaned content as JSON let versions_json: Value = match serde_json::from_str(&cleaned_versions) { Ok(json) => json, @@ -98,7 +81,6 @@ fn build_versions() -> io::Result<()> { return Err(io::Error::new(io::ErrorKind::InvalidData, "JSON parsing failed")); } }; - // Define the output file path for the JSON let dest_path = Path::new(".").join("versions.json"); let mut file = File::create(&dest_path)?; @@ -111,4 +93,4 @@ fn build_versions() -> io::Result<()> { )?; Ok(()) -} +} \ No newline at end of file diff --git a/dataavailability/datacommittee/datacommittee.go b/dataavailability/datacommittee/datacommittee.go index 369fc0fe6..6921a5296 100644 --- a/dataavailability/datacommittee/datacommittee.go +++ b/dataavailability/datacommittee/datacommittee.go @@ -13,8 +13,8 @@ import ( "github.com/0xPolygon/cdk-data-availability/client" daTypes "github.com/0xPolygon/cdk-data-availability/types" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygonHermez/zkevm-synchronizer-l1/translator" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" diff --git a/dataavailability/datacommittee/datacommittee_test.go b/dataavailability/datacommittee/datacommittee_test.go index ac138b449..b4ebd186d 100644 --- a/dataavailability/datacommittee/datacommittee_test.go +++ b/dataavailability/datacommittee/datacommittee_test.go @@ -6,9 +6,9 @@ import ( "testing" "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygondatacommittee" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/erc1967proxy" "github.com/0xPolygon/cdk/test/helpers" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" diff --git a/dataavailability/mocks_da/batch_data_provider.go b/dataavailability/mocks_da/mock_batch_data_provider.go similarity index 98% rename from dataavailability/mocks_da/batch_data_provider.go rename to dataavailability/mocks_da/mock_batch_data_provider.go index 36e782ace..2529c859f 100644 --- a/dataavailability/mocks_da/batch_data_provider.go +++ b/dataavailability/mocks_da/mock_batch_data_provider.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/dataavailability/mocks_da/da_backender.go b/dataavailability/mocks_da/mock_da_backender.go similarity index 99% rename from dataavailability/mocks_da/da_backender.go rename to dataavailability/mocks_da/mock_da_backender.go index d7ae9a050..f9f1187c1 100644 --- a/dataavailability/mocks_da/da_backender.go +++ b/dataavailability/mocks_da/mock_da_backender.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/dataavailability/mocks_da/data_manager.go b/dataavailability/mocks_da/mock_data_manager.go similarity index 99% rename from dataavailability/mocks_da/data_manager.go rename to dataavailability/mocks_da/mock_data_manager.go index 34345d715..697f8539f 100644 --- a/dataavailability/mocks_da/data_manager.go +++ b/dataavailability/mocks_da/mock_data_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/dataavailability/mocks_da/sequence_retriever.go b/dataavailability/mocks_da/mock_sequence_retriever.go similarity index 98% rename from dataavailability/mocks_da/sequence_retriever.go rename to dataavailability/mocks_da/mock_sequence_retriever.go index f82d9a70d..934376e87 100644 --- a/dataavailability/mocks_da/sequence_retriever.go +++ b/dataavailability/mocks_da/mock_sequence_retriever.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/dataavailability/mocks_da/sequence_sender.go b/dataavailability/mocks_da/mock_sequence_sender.go similarity index 98% rename from dataavailability/mocks_da/sequence_sender.go rename to dataavailability/mocks_da/mock_sequence_sender.go index f1e447413..a7fc2638b 100644 --- a/dataavailability/mocks_da/sequence_sender.go +++ b/dataavailability/mocks_da/mock_sequence_sender.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/dataavailability/mocks_da/sequence_sender_banana.go b/dataavailability/mocks_da/mock_sequence_sender_banana.go similarity index 98% rename from dataavailability/mocks_da/sequence_sender_banana.go rename to dataavailability/mocks_da/mock_sequence_sender_banana.go index aca7b1a33..9549518ad 100644 --- a/dataavailability/mocks_da/sequence_sender_banana.go +++ b/dataavailability/mocks_da/mock_sequence_sender_banana.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/dataavailability/mocks_da/sequence_sender_elderberry.go b/dataavailability/mocks_da/mock_sequence_sender_elderberry.go similarity index 98% rename from dataavailability/mocks_da/sequence_sender_elderberry.go rename to dataavailability/mocks_da/mock_sequence_sender_elderberry.go index 3816fa1b9..b37b703e6 100644 --- a/dataavailability/mocks_da/sequence_sender_elderberry.go +++ b/dataavailability/mocks_da/mock_sequence_sender_elderberry.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_da diff --git a/db/interface.go b/db/interface.go deleted file mode 100644 index 03f81aba0..000000000 --- a/db/interface.go +++ /dev/null @@ -1,17 +0,0 @@ -package db - -import ( - "context" - "database/sql" -) - -type Querier interface { - Exec(query string, args ...interface{}) (sql.Result, error) - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryRow(query string, args ...interface{}) *sql.Row -} - -type DBer interface { - Querier - BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) -} diff --git a/db/meddler.go b/db/meddler.go deleted file mode 100644 index 83df3b8ae..000000000 --- a/db/meddler.go +++ /dev/null @@ -1,236 +0,0 @@ -package db - -import ( - "errors" - "fmt" - "math/big" - "reflect" - "strings" - - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - sqlite "github.com/mattn/go-sqlite3" - "github.com/russross/meddler" -) - -// init registers tags to be used to read/write from SQL DBs using meddler -func init() { - meddler.Default = meddler.SQLite - meddler.Register("bigint", BigIntMeddler{}) - meddler.Register("merkleproof", MerkleProofMeddler{}) - meddler.Register("hash", HashMeddler{}) - meddler.Register("address", AddressMeddler{}) -} - -func SQLiteErr(err error) (*sqlite.Error, bool) { - sqliteErr := &sqlite.Error{} - if ok := errors.As(err, sqliteErr); ok { - return sqliteErr, true - } - if driverErr, ok := meddler.DriverErr(err); ok { - return sqliteErr, errors.As(driverErr, sqliteErr) - } - return sqliteErr, false -} - -// SliceToSlicePtrs converts any []Foo to []*Foo -func SliceToSlicePtrs(slice interface{}) interface{} { - v := reflect.ValueOf(slice) - vLen := v.Len() - typ := v.Type().Elem() - res := reflect.MakeSlice(reflect.SliceOf(reflect.PointerTo(typ)), vLen, vLen) - for i := 0; i < vLen; i++ { - res.Index(i).Set(v.Index(i).Addr()) - } - return res.Interface() -} - -// SlicePtrsToSlice converts any []*Foo to []Foo -func SlicePtrsToSlice(slice interface{}) interface{} { - v := reflect.ValueOf(slice) - vLen := v.Len() - typ := v.Type().Elem().Elem() - res := reflect.MakeSlice(reflect.SliceOf(typ), vLen, vLen) - for i := 0; i < vLen; i++ { - res.Index(i).Set(v.Index(i).Elem()) - } - return res.Interface() -} - -// BigIntMeddler encodes or decodes the field value to or from string -type BigIntMeddler struct{} - -// PreRead is called before a Scan operation for fields that have the BigIntMeddler -func (b BigIntMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // give a pointer to a byte buffer to grab the raw data - return new(string), nil -} - -// PostRead is called after a Scan operation for fields that have the BigIntMeddler -func (b BigIntMeddler) PostRead(fieldPtr, scanTarget interface{}) error { - ptr, ok := scanTarget.(*string) - if !ok { - return errors.New("scanTarget is not *string") - } - if ptr == nil { - return fmt.Errorf("BigIntMeddler.PostRead: nil pointer") - } - field, ok := fieldPtr.(**big.Int) - if !ok { - return errors.New("fieldPtr is not *big.Int") - } - decimal := 10 - *field, ok = new(big.Int).SetString(*ptr, decimal) - if !ok { - return fmt.Errorf("big.Int.SetString failed on \"%v\"", *ptr) - } - return nil -} - -// PreWrite is called before an Insert or Update operation for fields that have the BigIntMeddler -func (b BigIntMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { - field, ok := fieldPtr.(*big.Int) - if !ok { - return nil, errors.New("fieldPtr is not *big.Int") - } - - return field.String(), nil -} - -// MerkleProofMeddler encodes or decodes the field value to or from string -type MerkleProofMeddler struct{} - -// PreRead is called before a Scan operation for fields that have the MerkleProofMeddler -func (b MerkleProofMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // give a pointer to a byte buffer to grab the raw data - return new(string), nil -} - -// PostRead is called after a Scan operation for fields that have the MerkleProofMeddler -func (b MerkleProofMeddler) PostRead(fieldPtr, scanTarget interface{}) error { - ptr, ok := scanTarget.(*string) - if !ok { - return errors.New("scanTarget is not *string") - } - if ptr == nil { - return errors.New("ProofMeddler.PostRead: nil pointer") - } - field, ok := fieldPtr.(*tree.Proof) - if !ok { - return errors.New("fieldPtr is not tree.Proof") - } - strHashes := strings.Split(*ptr, ",") - if len(strHashes) != int(tree.DefaultHeight) { - return fmt.Errorf("unexpected len of hashes: expected %d actual %d", tree.DefaultHeight, len(strHashes)) - } - for i, strHash := range strHashes { - field[i] = common.HexToHash(strHash) - } - return nil -} - -// PreWrite is called before an Insert or Update operation for fields that have the MerkleProofMeddler -func (b MerkleProofMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { - field, ok := fieldPtr.(tree.Proof) - if !ok { - return nil, errors.New("fieldPtr is not tree.Proof") - } - var s string - for _, f := range field { - s += f.Hex() + "," - } - s = strings.TrimSuffix(s, ",") - return s, nil -} - -// HashMeddler encodes or decodes the field value to or from string -type HashMeddler struct{} - -// PreRead is called before a Scan operation for fields that have the HashMeddler -func (b HashMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // give a pointer to a byte buffer to grab the raw data - return new(string), nil -} - -// PostRead is called after a Scan operation for fields that have the HashMeddler -func (b HashMeddler) PostRead(fieldPtr, scanTarget interface{}) error { - rawHashPtr, ok := scanTarget.(*string) - if !ok { - return errors.New("scanTarget is not *string") - } - - // Handle the case where fieldPtr is a *common.Hash - field, ok := fieldPtr.(*common.Hash) - if ok { - *field = common.HexToHash(*rawHashPtr) - return nil - } - - // Handle the case where fieldPtr is a **common.Hash (nullable field) - hashPtr, ok := fieldPtr.(**common.Hash) - if ok { - // If the string is empty, set the hash to nil - if len(*rawHashPtr) == 0 { - *hashPtr = nil - // Otherwise, convert the string to a common.Hash and assign it - } else { - tmp := common.HexToHash(*rawHashPtr) - *hashPtr = &tmp - } - return nil - } - - // If fieldPtr is neither a *common.Hash nor a **common.Hash, return an error - return errors.New("fieldPtr is not *common.Hash or **common.Hash") -} - -// PreWrite is called before an Insert or Update operation for fields that have the HashMeddler -func (b HashMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { - field, ok := fieldPtr.(common.Hash) - if !ok { - hashPtr, ok := fieldPtr.(*common.Hash) - if !ok { - return nil, errors.New("fieldPtr is not common.Hash") - } - if hashPtr == nil { - return []byte{}, nil - } - return hashPtr.Hex(), nil - } - return field.Hex(), nil -} - -// AddressMeddler encodes or decodes the field value to or from string -type AddressMeddler struct{} - -// PreRead is called before a Scan operation for fields that have the AddressMeddler -func (b AddressMeddler) PreRead(fieldAddr interface{}) (scanTarget interface{}, err error) { - // give a pointer to a byte buffer to grab the raw data - return new(string), nil -} - -// PostRead is called after a Scan operation for fields that have the AddressMeddler -func (b AddressMeddler) PostRead(fieldPtr, scanTarget interface{}) error { - ptr, ok := scanTarget.(*string) - if !ok { - return errors.New("scanTarget is not *string") - } - if ptr == nil { - return errors.New("AddressMeddler.PostRead: nil pointer") - } - field, ok := fieldPtr.(*common.Address) - if !ok { - return errors.New("fieldPtr is not common.Address") - } - *field = common.HexToAddress(*ptr) - return nil -} - -// PreWrite is called before an Insert or Update operation for fields that have the AddressMeddler -func (b AddressMeddler) PreWrite(fieldPtr interface{}) (saveValue interface{}, err error) { - field, ok := fieldPtr.(common.Address) - if !ok { - return nil, errors.New("fieldPtr is not common.Address") - } - return field.Hex(), nil -} diff --git a/db/meddler_test.go b/db/meddler_test.go deleted file mode 100644 index 61017bbd9..000000000 --- a/db/meddler_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package db - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" -) - -func TestHashMeddler_PreWrite(t *testing.T) { - t.Parallel() - - hex := "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - hash := common.HexToHash(hex) - - tests := []struct { - name string - fieldPtr interface{} - wantValue interface{} - wantErr bool - }{ - { - name: "Valid common.Hash", - fieldPtr: hash, - wantValue: hex, - wantErr: false, - }, - { - name: "Valid *common.Hash", - fieldPtr: &hash, - wantValue: hex, - wantErr: false, - }, - { - name: "Nil *common.Hash", - fieldPtr: (*common.Hash)(nil), - wantValue: []byte{}, - wantErr: false, - }, - { - name: "Invalid type", - fieldPtr: "invalid", - wantValue: nil, - wantErr: true, - }, - } - - for _, tt := range tests { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - h := HashMeddler{} - gotValue, err := h.PreWrite(tt.fieldPtr) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.wantValue, gotValue) - } - }) - } -} diff --git a/db/migrations.go b/db/migrations.go deleted file mode 100644 index 8af35874e..000000000 --- a/db/migrations.go +++ /dev/null @@ -1,53 +0,0 @@ -package db - -import ( - "database/sql" - "fmt" - "strings" - - "github.com/0xPolygon/cdk/db/types" - "github.com/0xPolygon/cdk/log" - _ "github.com/mattn/go-sqlite3" - migrate "github.com/rubenv/sql-migrate" -) - -const ( - upDownSeparator = "-- +migrate Up" - dbPrefixReplacer = "/*dbprefix*/" -) - -// RunMigrations will execute pending migrations if needed to keep -// the database updated with the latest changes in either direction, -// up or down. -func RunMigrations(dbPath string, migrations []types.Migration) error { - db, err := NewSQLiteDB(dbPath) - if err != nil { - return fmt.Errorf("error creating DB %w", err) - } - return RunMigrationsDB(log.GetDefaultLogger(), db, migrations) -} - -func RunMigrationsDB(logger *log.Logger, db *sql.DB, migrations []types.Migration) error { - migs := &migrate.MemoryMigrationSource{Migrations: []*migrate.Migration{}} - for _, m := range migrations { - prefixed := strings.ReplaceAll(m.SQL, dbPrefixReplacer, m.Prefix) - splitted := strings.Split(prefixed, upDownSeparator) - migs.Migrations = append(migs.Migrations, &migrate.Migration{ - Id: m.Prefix + m.ID, - Up: []string{splitted[1]}, - Down: []string{splitted[0]}, - }) - } - - logger.Debugf("running migrations:") - for _, m := range migs.Migrations { - logger.Debugf("%+v", m.Id) - } - nMigrations, err := migrate.Exec(db, "sqlite3", migs, migrate.Up) - if err != nil { - return fmt.Errorf("error executing migration %w", err) - } - - logger.Infof("successfully ran %d migrations", nMigrations) - return nil -} diff --git a/db/sqlite.go b/db/sqlite.go deleted file mode 100644 index 23d5d2a19..000000000 --- a/db/sqlite.go +++ /dev/null @@ -1,29 +0,0 @@ -package db - -import ( - "database/sql" - "errors" - "fmt" - - _ "github.com/mattn/go-sqlite3" -) - -const ( - UniqueConstrain = 1555 -) - -var ( - ErrNotFound = errors.New("not found") -) - -// NewSQLiteDB creates a new SQLite DB -func NewSQLiteDB(dbPath string) (*sql.DB, error) { - return sql.Open("sqlite3", fmt.Sprintf("file:%s?_txlock=exclusive&_foreign_keys=on&_journal_mode=WAL", dbPath)) -} - -func ReturnErrNotFound(err error) error { - if errors.Is(err, sql.ErrNoRows) { - return ErrNotFound - } - return err -} diff --git a/db/tx.go b/db/tx.go deleted file mode 100644 index 926da07c9..000000000 --- a/db/tx.go +++ /dev/null @@ -1,60 +0,0 @@ -package db - -import ( - "context" -) - -type SQLTxer interface { - Querier - Commit() error - Rollback() error -} - -type Txer interface { - SQLTxer - AddRollbackCallback(cb func()) - AddCommitCallback(cb func()) -} - -type Tx struct { - SQLTxer - rollbackCallbacks []func() - commitCallbacks []func() -} - -func NewTx(ctx context.Context, db DBer) (Txer, error) { - tx, err := db.BeginTx(ctx, nil) - if err != nil { - return nil, err - } - return &Tx{ - SQLTxer: tx, - }, nil -} - -func (s *Tx) AddRollbackCallback(cb func()) { - s.rollbackCallbacks = append(s.rollbackCallbacks, cb) -} -func (s *Tx) AddCommitCallback(cb func()) { - s.commitCallbacks = append(s.commitCallbacks, cb) -} - -func (s *Tx) Commit() error { - if err := s.SQLTxer.Commit(); err != nil { - return err - } - for _, cb := range s.commitCallbacks { - cb() - } - return nil -} - -func (s *Tx) Rollback() error { - if err := s.SQLTxer.Rollback(); err != nil { - return err - } - for _, cb := range s.rollbackCallbacks { - cb() - } - return nil -} diff --git a/db/types/types.go b/db/types/types.go deleted file mode 100644 index ade190925..000000000 --- a/db/types/types.go +++ /dev/null @@ -1,7 +0,0 @@ -package types - -type Migration struct { - ID string - SQL string - Prefix string -} diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index d9d8cddfb..2cac9ca66 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,6 @@ # Summary -- [Getting Started](./getting_started.md) - - [Local Debug](./local_debug.md) +Welcome to the official documentation for the Polygon CDK (Chain Development Kit). This guide will help you get started with building and deploying rollups using the Polygon CDK. + +- [Local Debug](./local_debug.md) - [DA Integration](./da_integration.md) -- [Non-EVM integrations](./non_evm_integration.md) -- [AggOracle](./aggoracle.md) diff --git a/docs/aggoracle.md b/docs/aggoracle.md deleted file mode 100644 index b67675753..000000000 --- a/docs/aggoracle.md +++ /dev/null @@ -1,123 +0,0 @@ -# AggOracle Component - Developer Documentation - -## Overview - -The **AggOracle** component ensures the **Global Exit Root (GER)** is propagated from L1 to the L2 sovereign chain smart contract. This is critical for enabling asset and message bridging between chains. - -The GER is picked up from the smart contract by **LastGERSyncer** for local storage. - -### Key Components: - -- **ChainSender**: Interface for submitting GERs to the smart contract. -- **EVMChainGERSender**: An implementation of `ChainSender`. - ---- - -## Workflow - -### What is Global Exit Root (GER)? - -The **Global Exit Root** consolidates: - -- **Mainnet Exit Root (MER)**: Updated during bridge transactions from L1. -- **Rollup Exit Root (RER)**: Updated when verified rollup batches are submitted via ZKP. - - GER = hash(MER, RER) - -### Process - -1. **Fetch Finalized GER**: - - AggOracle retrieves the latest GER finalized on L1. -2. **Check GER Injection**: - - Confirms whether the GER is already stored in the smart contract. -3. **Inject GER**: - - If missing, AggOracle submits the GER via the `insertGlobalExitRoot` function. -4. **Sync Locally**: - - LastGERSyncer fetches and stores the GER locally for downstream use. - -The sequence diagram below depicts the interaction in the AggOracle. - -```mermaid -sequenceDiagram - participant AggOracle - participant ChainSender - participant L1InfoTreer - participant L1Client - - AggOracle->>AggOracle: start - loop trigger on preconfigured frequency - AggOracle->>AggOracle: process latest GER - AggOracle->>L1InfoTreer: get last finalized GER - alt targetBlockNum == 0 - AggOracle->>L1Client: get (latest) header by number - L1Client-->>AggOracle: the latest header - AggOracle->>L1InfoTreer: get latest L1 info tree until provided header - L1InfoTreer-->>AggOracle: global exit root (from L1 info tree) - else - AggOracle->>L1InfoTreer: get latest L1 info tree until provided header - L1InfoTreer-->>AggOracle: global exit root (from L1 info tree) - end - AggOracle->>ChainSender: check is GER injected - ChainSender-->>AggOracle: isGERInjected result - alt is GER injected - AggOracle->>AggOracle: log GER already injected - else - AggOracle->>ChainSender: inject GER - ChainSender-->>AggOracle: GER injection result - end - end - AggOracle->>AggOracle: handle GER processing error -``` - ---- - -## Key Components - -### 1. AggOracle - -The `AggOracle` fetches the finalized GER and ensures its injection into the L2 smart contract. - -### Functions: - -- **`Start`**: Periodically processes GER updates using a ticker. -- **`processLatestGER`**: Checks if the GER exists and injects it if necessary. -- **`getLastFinalizedGER`**: Retrieves the latest finalized GER based on block finality. - ---- - -### 2. ChainSender Interface - -Defines the interface for submitting GERs. - -``` -IsGERInjected(ger common.Hash) (bool, error) -InjectGER(ctx context.Context, ger common.Hash) error -``` - ---- - -### 3. EVMChainGERSender - -Implements `ChainSender` using Ethereum clients and transaction management. - -### Functions: - -- **`IsGERInjected`**: Verifies GER presence in the smart contract. -- **`InjectGER`**: Submits the GER using the `insertGlobalExitRoot` method and monitors transaction status. - ---- - -## Smart Contract Integration - -- **Contract**: `GlobalExitRootManagerL2SovereignChain.sol` -- **Function**: `insertGlobalExitRoot` - - [Source Code](https://github.com/0xPolygonHermez/zkevm-contracts/blob/feature/audit-remediations/contracts/v2/sovereignChains/GlobalExitRootManagerL2SovereignChain.sol#L89-L103) -- **Bindings**: Available in [cdk-contracts-tooling](https://github.com/0xPolygon/cdk-contracts-tooling/tree/main/contracts/l2-sovereign-chain). - ---- - -## Summary - -The **AggOracle** component automates the propagation of GERs from L1 to L2, enabling bridging across networks. - -Refer to the EVM implementation in [evm.go](https://github.com/0xPolygon/cdk/blob/main/aggoracle/chaingersender/evm.go) for guidance on building new chain senders. diff --git a/docs/da_integration.md b/docs/da_integration.md index a752d01f7..61047922d 100644 --- a/docs/da_integration.md +++ b/docs/da_integration.md @@ -14,7 +14,7 @@ The versions of the smart contracts that are being targeted for the DA integrati - zkEVM to implement a rollup. - Validium to implement a validium. -- Adding a custom solution. +- Adding a custom solution. This document only considers the first approach, reusing the `PolygonValidium` consensus. That being said, the `PolygonValidium` implementation allows a custom smart contract to be used in the relevant interaction. This could be used by DAs to add custom on-chain verification logic. While verifying the DA integrity is optional, any new protocol will need to develop a custom smart contract in order to be successfully integrated (more details bellow) @@ -48,7 +48,7 @@ It's expected that any protocol build their own contract that follows [this inte In order to integrate a DA solution into CDK, the most fundamental part is for the node to be able to post and retrieve data from the DA backend. -Up until now, DAs would fork the `cdk-validium-node` repo to make such an integration. But maintaining forks can be really painful, so the team is proposing this solution that will allow the different DAs to be 1st class citizens and live on the official `cdk` repo. +Up until now, DAs would fork the `cdk-validium-node` repo to make such an integration. But maintaining forks can be really painful, so the team is proposing this solution that will allow the different DAs to be 1st class citizens and live on the official `cdk` repo. These items would need to be implemented to have a successful integration: @@ -64,7 +64,7 @@ These items would need to be implemented to have a successful integration: 1. Create an E2E test that uses your protocol by following the [test/e2e/datacommittee_test.go](https://github.com/0xPolygon/cdk-validium-node/blob/develop/test/e2e/datacommittee_test.go) example. 2. Follow the instructions on [Local Debug](local_debug.md) to run Kurtosis enviroment for local testing -4. Deploy the new contract contract to L1 running in Kurtosis +3. Deploy the new contract contract to L1 running in Kurtosis 4. Call `setDataAvailabilityProtocol` in validium consensus contract to use the newly deployed contract. 5. Modify the `Makefile` to be able to run your test, take the case of the DAC test as an example here diff --git a/docs/getting_started.md b/docs/getting_started.md deleted file mode 100644 index 702dabaf0..000000000 --- a/docs/getting_started.md +++ /dev/null @@ -1,17 +0,0 @@ -# Welcome to Polygon CDK Tech Docs - -Welcome to the official documentation for the Polygon CDK (Chain Development Kit). This guide will help you get started with building and deploying rollups using the Polygon CDK. - -## Getting Started - -To get started with Polygon CDK, follow these steps: -1. [Getting Started](getting_started.md) - -## Documentation - -Explore the comprehensive documentation to understand the various features and capabilities of the Polygon CDK: -- [3rd party data availability integration](da_integration.md) - -## Support - -Happy coding with Polygon CDK! diff --git a/docs/local_debug.md b/docs/local_debug.md index dc6e998bb..4406101ad 100644 --- a/docs/local_debug.md +++ b/docs/local_debug.md @@ -10,7 +10,7 @@ ## Create configuration for this kurtosis environment -``` +```bash scripts/local_config ``` diff --git a/docs/non_evm_integration.md b/docs/non_evm_integration.md deleted file mode 100644 index ed304ea58..000000000 --- a/docs/non_evm_integration.md +++ /dev/null @@ -1,69 +0,0 @@ -# Integrating non-EVM systems - -This guide explains how to connect a third-party execution environment to the AggLayer using the CDK. - -## Important note - -The following information is experimental, and there aren't any working examples of non-EVM integrations with the AggLayer yet. While we know what needs to be done conceptually, the implementation details are likely to evolve. Think of this as a rough overview of the effort involved, rather than a step-by-step guide towards a production deployment. - -## Key Concepts - -Any system (chain or not chain) should be able to interact with the [unified LxLy bridge](https://docs.polygon.technology/zkEVM/architecture/unified-LxLy) and settle using the [AggLayer](https://docs.polygon.technology/learn/agglayer/overview/); especially when using the [Pessimistic Proof](https://docs.polygon.technology/learn/agglayer/pessimistic_proof/) option. Support for additional proofs, such as consensus, execution, or data availability are planned for the future. But, for now, this guide is based solely on using the Pessimistic Proof for settlement. - -The CDK client handles the integration with both the unified LxLy bridge and AggLayer. Think of it as an SDK to bring your project into the AggLayer ecosystem. You'll need to write some custom code in an adapter/plugin style so that the CDK client can connect with your service. - -In some cases, you might need to write code in `Go`. When that happens, the code should be in a separate repo and imported into the CDK as a dependency. The goal is to provide implementations that can interact with the *smart contracts* of the system being integrated, allowing the CDK client to reuse the same logic across different systems. Basically, you’ll need to create some *adapters* for the new system, while the existing code handles the rest. - -## Components for integration - -### Smart contracts - -For EVM-based integrations, there are two relevant smart contracts: - -- [Global exit root](https://github.com/0xPolygonHermez/zkevm-contracts/blob/feature/sovereign-bridge/contracts/v2/sovereignChains/GlobalExitRootManagerL2SovereignChain.sol) -- [Bridge](https://github.com/0xPolygonHermez/zkevm-contracts/blob/feature/sovereign-bridge/contracts/v2/sovereignChains/BridgeL2SovereignChain.sol) - -The integrated system needs to implement similar functionality. It doesn't have to be a smart contract per se, and it doesn't need to be split into two parts, but it should perform the functions that we list here: - -- Bridge assets and messages to other networks. -- Handle incoming asset/message claims. -- Export local exit roots (a hash needed for other networks to claim assets). -- Import global exit roots (a hash needed for processing bridge claims). - -### AggOracle - -This component imports global exit roots into the smart contract(s). It should be implemented as a `Go` package, using the [EVM example](../aggoracle/chaingersender/evm.go) as a reference. It should implement the `ChainSender` interface defined [here](../aggoracle/oracle.go). - -### BridgeSync - -BridgeSync synchronizes information about bridges and claims originating from the L2 service attached to the CDK client. In other words, it monitors what's happening with the bridge smart contract, collects the necessary data for interacting with the AggLayer, and feeds the bridge service to enable claims on destination networks. - -> **Heads up:** These interfaces may change. - -To process events from non-EVM systems, you'll need a `downloader` and `driver`. The current setup needs some tweaks to support custom implementations. In short, you need to work with the [`Processor`](../bridgesync/processor.go), particularly the `ProcessorInterface` found [here](../sync/driver.go). The `Events` in `Block` are just interfaces, which should be parsed as `Event` structs defined in the [`Processor`](../bridgesync/processor.go). - -### Claim sponsor - -This component performs claims on behalf of users, which is crucial for systems with "gas" fees (transaction costs). Without it, gas-based systems could face a chicken/egg situation: How can users pay for a claim if they need a previous claim to get the funds to pay for it? - -The claim sponsor is optional and may not be needed in some setups. The [bridge RPC](../rpc/bridge.go) includes a config parameter to enable or disable it. To implement a claim sponsor that can perform claim transactions on the bridge smart contract, you'll need to implement the `ClaimSender` interface, defined [here](../claimsponsor/claimsponsor.go). - -### Last GER sync - -> **Warning:** These interfaces may also change. - -This component tracks which global exit roots have been imported. It helps the bridge service know when incoming bridges are ready to be claimed. The work needed is similar to that for the bridge sync: Implement the [`ProcessorInterface`](../sync/driver.go), with events of type `Event` defined [here](../lastgersync/processor.go). - -## Additional considerations - -### Bridge - -Once all components are implemented, the network should be connected to the unified LxLy bridge. However, keep in mind: - -- Outgoing bridges should work with current tools and UIs, but incoming bridges may not. When using the claim sponsor, things should just work. However, the claim sponsor is optional... The point being that the existing UIs are built to send EVM transactions to make the claim in the absence of claim sponsor. So any claim interaction beyond the auto-claim functionality will need UIs and tooling that are out of the sope of the CDK. -- Bridging assets/messages to another network is specific to the integrated system. You'll need to create mechanisms to interact with the *bridge smart contract* of your service for these actions. -- We’re moving towards an *in-CDK* bridge service (spec [here](https://hackmd.io/0vA-XU2BRHmH3Ab0j4ouZw)), replacing the current separate service ([here](https://github.com/0xPolygonHermez/zkevm-bridge-service)). There's no stable API yet, and SDKs/UIs are still in development. - -### AggLayer - -AggLayer integration will work once the components are ready, but initially, it will only support Pessimistic Proof. Later updates will add more security features like execution proofs, consensus proofs, data availability, and forced transactions. These will be optional, while Pessimistic Proof will remain mandatory. \ No newline at end of file diff --git a/etherman/aggregator.go b/etherman/aggregator.go index 4197c7a27..1d18cdcbd 100644 --- a/etherman/aggregator.go +++ b/etherman/aggregator.go @@ -9,7 +9,7 @@ import ( "strings" ethmanTypes "github.com/0xPolygon/cdk/aggregator/ethmantypes" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" diff --git a/etherman/contracts/base.go b/etherman/contracts/base.go index acc19e76b..33dac74c3 100644 --- a/etherman/contracts/base.go +++ b/etherman/contracts/base.go @@ -3,7 +3,7 @@ package contracts import ( "reflect" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ) diff --git a/etherman/etherman.go b/etherman/etherman.go index fa0033033..edee4ef05 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -14,7 +14,7 @@ import ( cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/etherman/config" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" diff --git a/go.mod b/go.mod index a70c8e608..b2c059cf0 100644 --- a/go.mod +++ b/go.mod @@ -3,39 +3,31 @@ module github.com/0xPolygon/cdk go 1.22.4 require ( - github.com/0xPolygon/cdk-contracts-tooling v0.0.1 - github.com/0xPolygon/cdk-data-availability v0.0.11 + github.com/0xPolygon/cdk-contracts-tooling v0.0.2-0.20241225094934-1d381f5703ef + github.com/0xPolygon/cdk-data-availability v0.0.12-0.20250110120923-25a978231f89 github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 github.com/0xPolygon/zkevm-ethtx-manager v0.2.4 github.com/0xPolygonHermez/zkevm-synchronizer-l1 v1.0.6 + github.com/agglayer/aggkit v0.0.2-0.20250210155301-c8daf4c3283e github.com/ethereum/go-ethereum v1.14.10 - github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 - github.com/hermeznetwork/tracerr v0.3.2 github.com/iden3/go-iden3-crypto v0.0.17 - github.com/invopop/jsonschema v0.12.0 + github.com/invopop/jsonschema v0.13.0 github.com/jackc/pgx/v4 v4.18.3 github.com/knadh/koanf/parsers/json v0.1.0 github.com/knadh/koanf/parsers/toml v0.1.0 github.com/knadh/koanf/providers/rawbytes v0.1.0 - github.com/knadh/koanf/v2 v2.1.1 - github.com/mattn/go-sqlite3 v1.14.24 + github.com/knadh/koanf/v2 v2.1.2 github.com/mitchellh/mapstructure v1.5.0 - github.com/pelletier/go-toml/v2 v2.2.2 + github.com/pelletier/go-toml/v2 v2.2.3 github.com/rubenv/sql-migrate v1.7.1 - github.com/russross/meddler v1.0.1 github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.10.0 - github.com/urfave/cli/v2 v2.27.4 + github.com/urfave/cli/v2 v2.27.5 github.com/valyala/fasttemplate v1.2.2 - go.opentelemetry.io/otel v1.24.0 - go.opentelemetry.io/otel/metric v1.24.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.31.0 - golang.org/x/net v0.33.0 - golang.org/x/sync v0.10.0 + golang.org/x/net v0.34.0 google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.34.2 - modernc.org/sqlite v1.32.0 + google.golang.org/protobuf v1.36.5 ) require ( @@ -57,7 +49,7 @@ require ( github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.13.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/crate-crypto/go-kzg-4844 v1.1.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -71,20 +63,19 @@ require ( github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/getsentry/sentry-go v0.28.1 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect - github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect + github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 // indirect github.com/golang-jwt/jwt/v4 v4.5.1 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 // indirect + github.com/hermeznetwork/tracerr v0.3.2 // indirect github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.3.2 // indirect @@ -109,6 +100,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-sqlite3 v1.14.24 // indirect github.com/miguelmota/go-solidity-sha3 v0.1.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect @@ -117,7 +109,6 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncruces/go-strftime v0.1.9 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/onsi/gomega v1.27.10 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -127,9 +118,10 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.2.0 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/russross/meddler v1.0.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect @@ -148,21 +140,20 @@ require ( github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect - go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/multierr v1.10.0 // indirect + golang.org/x/crypto v0.33.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a // indirect modernc.org/libc v1.60.0 // indirect modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.8.0 // indirect - modernc.org/strutil v1.2.0 // indirect - modernc.org/token v1.1.0 // indirect + modernc.org/sqlite v1.34.5 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index e496f9b30..dada0663a 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ -github.com/0xPolygon/cdk-contracts-tooling v0.0.1 h1:2HH8KpO1CZRl1zHfn0IYwJhPA7l91DOWrjdExmaB9Kk= -github.com/0xPolygon/cdk-contracts-tooling v0.0.1/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= -github.com/0xPolygon/cdk-data-availability v0.0.11 h1:enmlyFYCvmDmcX/2fnDjWnn3svqqm9o2Fe+Kupoykdo= -github.com/0xPolygon/cdk-data-availability v0.0.11/go.mod h1:20WaXcSp7ggoxWePL9ReKSuqksHUx5h8LNQ+b56OHJE= +github.com/0xPolygon/cdk-contracts-tooling v0.0.2-0.20241225094934-1d381f5703ef h1:DRBrbysjMTyeFRbyo+zoltOTET+vR20CnXc4wupj+qo= +github.com/0xPolygon/cdk-contracts-tooling v0.0.2-0.20241225094934-1d381f5703ef/go.mod h1:mFlcEjsm2YBBsu8atHJ3zyVnwM+Z/fMXpVmIJge+WVU= +github.com/0xPolygon/cdk-data-availability v0.0.12-0.20250110120923-25a978231f89 h1:dqMjGG25vl7yq/G7RN+xvePsxeoMRtC/FU+IJrwcFAs= +github.com/0xPolygon/cdk-data-availability v0.0.12-0.20250110120923-25a978231f89/go.mod h1:Uv6+NnNQ5X6Drdb8YIhfA0kuQvFxVhJsnFZXUyq6050= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6 h1:FXL/rcO7/GtZ3kRFw+C7J6vmGnl8gcazg+Gh/NVmnas= github.com/0xPolygon/cdk-rpc v0.0.0-20241004114257-6c3cb6eebfb6/go.mod h1:2scWqMMufrQXu7TikDgQ3BsyaKoX8qP26D6E262vSOg= github.com/0xPolygon/zkevm-ethtx-manager v0.2.4 h1:OrtSD8jLVeQnN+1I0c7U/3+EYSd+h3wm1vygrDAXBZg= @@ -20,6 +20,8 @@ github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDO github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= +github.com/agglayer/aggkit v0.0.2-0.20250210155301-c8daf4c3283e h1:G9WRfV0R94jogpSm3ydKWgWNT+h6wbBhIo7GZLIG6QY= +github.com/agglayer/aggkit v0.0.2-0.20250210155301-c8daf4c3283e/go.mod h1:vT3LjN3DuNqpTs2T9jJAjnorkKjCK15R4y/oI49+hEY= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= @@ -65,8 +67,8 @@ github.com/consensys/gnark-crypto v0.13.0 h1:VPULb/v6bbYELAPTDFINEVaMTTybV5GLxDd github.com/consensys/gnark-crypto v0.13.0/go.mod h1:wKqwsieaKPThcFkHe0d0zMsbHEUWFmZcG7KBCse210o= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.5 h1:ZtcqGrnekaHpVLArFSe4HK5DoKx1T0rq2DwVB0alcyc= +github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a h1:W8mUrRp6NOVl3J+MYp5kPMoUZPp7aOYHtaua31lwRHg= github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a/go.mod h1:sTwzHBvIzm2RfVCGNEBZgRyjwK40bVoun3ZnGOCafNM= github.com/crate-crypto/go-kzg-4844 v1.1.0 h1:EN/u9k2TF6OWSHrCCDBBU6GLNMq88OspHHlMnHfoyU4= @@ -110,11 +112,6 @@ github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -125,8 +122,8 @@ github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfC github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 h1:TQcrn6Wq+sKGkpyPvppOz99zsMBaUOKXq6HSv655U1c= -github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= +github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= @@ -168,8 +165,6 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157 h1:uyodBE3xDz0ynKs1tLBU26wOQoEkAqqiY18DbZ+FZrA= github.com/hashicorp/hcl v1.0.1-0.20180906183839-65a6292f0157/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hermeznetwork/tracerr v0.3.2 h1:QB3TlQxO/4XHyixsg+nRZPuoel/FFQlQ7oAoHDD5l1c= @@ -186,8 +181,8 @@ github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFck github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/iden3/go-iden3-crypto v0.0.17 h1:NdkceRLJo/pI4UpcjVah4lN/a3yzxRUGXqxbWcYh9mY= github.com/iden3/go-iden3-crypto v0.0.17/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= -github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= -github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -254,8 +249,8 @@ github.com/knadh/koanf/parsers/toml v0.1.0 h1:S2hLqS4TgWZYj4/7mI5m1CQQcWurxUz6OD github.com/knadh/koanf/parsers/toml v0.1.0/go.mod h1:yUprhq6eo3GbyVXFFMdbfZSo928ksS+uo0FFqNMnO18= github.com/knadh/koanf/providers/rawbytes v0.1.0 h1:dpzgu2KO6uf6oCb4aP05KDmKmAmI51k5pe8RYKQ0qME= github.com/knadh/koanf/providers/rawbytes v0.1.0/go.mod h1:mMTB1/IcJ/yE++A2iEZbY1MLygX7vttU+C+S/YmPu9c= -github.com/knadh/koanf/v2 v2.1.1 h1:/R8eXqasSTsmDCsAyYj+81Wteg8AqrV9CP6gvsTsOmM= -github.com/knadh/koanf/v2 v2.1.1/go.mod h1:4mnTRbZCK+ALuBXHZMjDfG9y714L7TykVnZkXbMU3Es= +github.com/knadh/koanf/v2 v2.1.2 h1:I2rtLRqXRy1p01m/utEtpZSSA6dcJbgGVuE27kW2PzQ= +github.com/knadh/koanf/v2 v2.1.2/go.mod h1:Gphfaen0q1Fc1HTgJgSTC4oRX9R2R5ErYMZJy8fLJBo= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -338,8 +333,8 @@ github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -365,8 +360,8 @@ github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= @@ -406,8 +401,6 @@ github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -415,11 +408,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -434,8 +423,8 @@ github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+F github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= -github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= -github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= +github.com/urfave/cli/v2 v2.27.5 h1:WoHEJLdsXr6dDWoJgMq/CboDmyY/8HMMH1fTECbih+w= +github.com/urfave/cli/v2 v2.27.5/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= @@ -447,12 +436,6 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBi github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= -go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= -go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= -go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= -go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -479,8 +462,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= +golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -502,15 +485,15 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -542,8 +525,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -553,8 +536,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= @@ -591,8 +574,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -623,8 +606,6 @@ modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= modernc.org/gc/v2 v2.5.0 h1:bJ9ChznK1L1mUtAQtxi0wi5AtAs5jQuw4PrPHO5pb6M= modernc.org/gc/v2 v2.5.0/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= -modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a h1:CfbpOLEo2IwNzJdMvE8aiRbPMxoTpgAJeyePh0SmO8M= -modernc.org/gc/v3 v3.0.0-20240801135723-a856999a2e4a/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= modernc.org/libc v1.60.0 h1:XeRF1gXky7JE5E8IErtYAdKj+ykZPdYUsgJNQ8RFWIA= modernc.org/libc v1.60.0/go.mod h1:xJuobKuNxKH3RUatS7GjR+suWj+5c2K7bi4m/S5arOY= modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= @@ -635,8 +616,8 @@ modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= -modernc.org/sqlite v1.32.0 h1:6BM4uGza7bWypsw4fdLRsLxut6bHe4c58VeqjRgST8s= -modernc.org/sqlite v1.32.0/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA= +modernc.org/sqlite v1.34.5 h1:Bb6SR13/fjp15jt70CL4f18JIN7p7dnMExd+UFnF15g= +modernc.org/sqlite v1.34.5/go.mod h1:YLuNmX9NKs8wRNK2ko1LW1NGYcc9FkBO69JOt1AR9JE= modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= diff --git a/hex/hex.go b/hex/hex.go deleted file mode 100644 index c7e1f860c..000000000 --- a/hex/hex.go +++ /dev/null @@ -1,124 +0,0 @@ -package hex - -import ( - "encoding/hex" - "fmt" - "math/big" - "strconv" - "strings" -) - -const ( - // Base represents the hexadecimal base, which is 16 - Base = 16 - - // BitSize64 64 bits - BitSize64 = 64 -) - -// DecError represents an error when decoding a hex value -type DecError struct{ msg string } - -func (err DecError) Error() string { return err.msg } - -// EncodeToHex generates a hex string based on the byte representation, with the '0x' prefix -func EncodeToHex(str []byte) string { - return "0x" + hex.EncodeToString(str) -} - -// EncodeToString is a wrapper method for hex.EncodeToString -func EncodeToString(str []byte) string { - return hex.EncodeToString(str) -} - -// DecodeString returns the byte representation of the hexadecimal string -func DecodeString(str string) ([]byte, error) { - return hex.DecodeString(str) -} - -// DecodeHex converts a hex string to a byte array -func DecodeHex(str string) ([]byte, error) { - str = strings.TrimPrefix(str, "0x") - - // Check if the string has an odd length - if len(str)%2 != 0 { - // Prepend a '0' to make it even-length - str = "0" + str - } - - return hex.DecodeString(str) -} - -// MustDecodeHex type-checks and converts a hex string to a byte array -func MustDecodeHex(str string) []byte { - buf, err := DecodeHex(str) - if err != nil { - panic(fmt.Errorf("could not decode hex: %w", err)) - } - - return buf -} - -// DecodeUint64 type-checks and converts a hex string to a uint64 -func DecodeUint64(str string) uint64 { - i := DecodeBig(str) - - return i.Uint64() -} - -// EncodeUint64 encodes a number as a hex string with 0x prefix. -func EncodeUint64(i uint64) string { - enc := make([]byte, 2, 10) //nolint:mnd - copy(enc, "0x") - - return string(strconv.AppendUint(enc, i, Base)) -} - -// BadNibble is a nibble that is bad -const BadNibble = ^uint64(0) - -// DecodeNibble decodes a byte into a uint64 -func DecodeNibble(in byte) uint64 { - switch { - case in >= '0' && in <= '9': - return uint64(in - '0') - case in >= 'A' && in <= 'F': - return uint64(in - 'A' + 10) //nolint:mnd - case in >= 'a' && in <= 'f': - return uint64(in - 'a' + 10) //nolint:mnd - default: - return BadNibble - } -} - -// EncodeBig encodes bigint as a hex string with 0x prefix. -// The sign of the integer is ignored. -func EncodeBig(bigint *big.Int) string { - numBits := bigint.BitLen() - if numBits == 0 { - return "0x0" - } - - return fmt.Sprintf("%#x", bigint) -} - -// DecodeBig converts a hex number to a big.Int value -func DecodeBig(hexNum string) *big.Int { - str := strings.TrimPrefix(hexNum, "0x") - createdNum := new(big.Int) - createdNum.SetString(str, Base) - - return createdNum -} - -// IsValid checks if the provided string is a valid hexadecimal value -func IsValid(s string) bool { - str := strings.TrimPrefix(s, "0x") - for _, b := range []byte(str) { - if !(b >= '0' && b <= '9' || b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F') { - return false - } - } - - return true -} diff --git a/hex/hex_test.go b/hex/hex_test.go deleted file mode 100644 index da86da358..000000000 --- a/hex/hex_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package hex - -import ( - "encoding/hex" - "math" - "math/big" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEncodeDecodeBig(t *testing.T) { - b := big.NewInt(math.MaxInt64) - e := EncodeBig(b) - d := DecodeBig(e) - assert.Equal(t, b.Uint64(), d.Uint64()) -} - -// Define a struct for test cases -type TestCase struct { - input string - output []byte - err error -} - -// Unit test function -func TestDecodeHex(t *testing.T) { - testCases := []TestCase{ - {"0", []byte{0}, nil}, - {"00", []byte{0}, nil}, - {"0x0", []byte{0}, nil}, - {"0x00", []byte{0}, nil}, - {"1", []byte{1}, nil}, - {"01", []byte{1}, nil}, - {"", []byte{}, hex.ErrLength}, - {"0x", []byte{}, hex.ErrLength}, - {"zz", []byte{}, hex.InvalidByteError('z')}, - } - - for _, tc := range testCases { - t.Run(tc.input, func(t *testing.T) { - output, err := DecodeHex(tc.input) - if tc.err != nil { - require.Error(t, tc.err, err) - } else { - require.NoError(t, err) - } - require.Equal(t, output, tc.output) - }) - } -} diff --git a/l1infotree/hash.go b/l1infotree/hash.go deleted file mode 100644 index 5a33f5a32..000000000 --- a/l1infotree/hash.go +++ /dev/null @@ -1,45 +0,0 @@ -package l1infotree - -import ( - "encoding/binary" - - "github.com/ethereum/go-ethereum/common" - "github.com/iden3/go-iden3-crypto/keccak256" - "golang.org/x/crypto/sha3" -) - -// Hash calculates the keccak hash of elements. -func Hash(data ...[32]byte) [32]byte { - var res [32]byte - hash := sha3.NewLegacyKeccak256() - for _, d := range data { - hash.Write(d[:]) - } - copy(res[:], hash.Sum(nil)) - - return res -} - -func generateZeroHashes(height uint8) [][32]byte { - var zeroHashes = [][32]byte{ - common.Hash{}, - } - // This generates a leaf = HashZero in position 0. In the rest of the positions that - // are equivalent to the ascending levels, we set the hashes of the nodes. - // So all nodes from level i=5 will have the same value and same children nodes. - for i := 1; i <= int(height); i++ { - zeroHashes = append(zeroHashes, Hash(zeroHashes[i-1], zeroHashes[i-1])) - } - - return zeroHashes -} - -// HashLeafData calculates the keccak hash of the leaf values. -func HashLeafData(ger, prevBlockHash common.Hash, minTimestamp uint64) [32]byte { - var res [32]byte - t := make([]byte, 8) //nolint:mnd - binary.BigEndian.PutUint64(t, minTimestamp) - copy(res[:], keccak256.Hash(ger.Bytes(), prevBlockHash.Bytes(), t)) - - return res -} diff --git a/l1infotree/hash_test.go b/l1infotree/hash_test.go deleted file mode 100644 index a792c0b82..000000000 --- a/l1infotree/hash_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package l1infotree - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" -) - -func TestHashLeaf(t *testing.T) { - expectedLeafHash := common.HexToHash("0xf62f487534b899b1c362242616725878188ca891fab60854b792ca0628286de7") - - prevBlockHash := common.HexToHash("0x24a5871d68723340d9eadc674aa8ad75f3e33b61d5a9db7db92af856a19270bb") - var minTimestamp uint64 = 1697231573 - ger := common.HexToHash("0x16994edfddddb9480667b64174fc00d3b6da7290d37b8db3a16571b4ddf0789f") - - leaf := HashLeafData(ger, prevBlockHash, minTimestamp) - - assert.Equal(t, expectedLeafHash, common.BytesToHash(leaf[:])) -} diff --git a/l1infotree/tree.go b/l1infotree/tree.go deleted file mode 100644 index f3ad6d36e..000000000 --- a/l1infotree/tree.go +++ /dev/null @@ -1,206 +0,0 @@ -package l1infotree - -import ( - "fmt" - - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum/common" -) - -// L1InfoTree provides methods to compute L1InfoTree -type L1InfoTree struct { - logger *log.Logger - height uint8 - zeroHashes [][32]byte - count uint32 - siblings [][32]byte - currentRoot common.Hash -} - -// NewL1InfoTree creates new L1InfoTree. -func NewL1InfoTree(logger *log.Logger, height uint8, initialLeaves [][32]byte) (*L1InfoTree, error) { - mt := &L1InfoTree{ - logger: logger, - zeroHashes: generateZeroHashes(height), - height: height, - count: uint32(len(initialLeaves)), - } - var err error - mt.siblings, mt.currentRoot, err = mt.initSiblings(initialLeaves) - if err != nil { - mt.logger.Error("error initializing siblings. Error: ", err) - - return nil, err - } - mt.logger.Debug("Initial count: ", mt.count) - mt.logger.Debug("Initial root: ", mt.currentRoot) - return mt, nil -} - -// ResetL1InfoTree resets the L1InfoTree. -func (mt *L1InfoTree) ResetL1InfoTree(initialLeaves [][32]byte) (*L1InfoTree, error) { - const defaultTreeHeight = 32 - mt.logger.Info("Resetting L1InfoTree...") - newMT := &L1InfoTree{ - zeroHashes: generateZeroHashes(defaultTreeHeight), - height: defaultTreeHeight, - count: uint32(len(initialLeaves)), - } - var err error - newMT.siblings, newMT.currentRoot, err = newMT.initSiblings(initialLeaves) - if err != nil { - mt.logger.Error("error initializing siblings. Error: ", err) - - return nil, err - } - mt.logger.Debug("Reset initial count: ", newMT.count) - mt.logger.Debug("Reset initial root: ", newMT.currentRoot) - return newMT, nil -} - -func buildIntermediate(leaves [][32]byte) ([][][]byte, [][32]byte) { - var ( - nodes [][][]byte - hashes [][32]byte - ) - for i := 0; i < len(leaves); i += 2 { - var left, right = i, i + 1 - hash := Hash(leaves[left], leaves[right]) - nodes = append(nodes, [][]byte{hash[:], leaves[left][:], leaves[right][:]}) - hashes = append(hashes, hash) - } - - return nodes, hashes -} - -// BuildL1InfoRoot computes the root given the leaves of the tree -func (mt *L1InfoTree) BuildL1InfoRoot(leaves [][32]byte) (common.Hash, error) { - var ( - nodes [][][][]byte - ns [][][]byte - ) - if len(leaves) == 0 { - leaves = append(leaves, mt.zeroHashes[0]) - } - - for h := uint8(0); h < mt.height; h++ { - if len(leaves)%2 == 1 { - leaves = append(leaves, mt.zeroHashes[h]) - } - ns, leaves = buildIntermediate(leaves) - nodes = append(nodes, ns) - } - if len(ns) != 1 { - return common.Hash{}, fmt.Errorf("error: more than one root detected: %+v", nodes) - } - - return common.BytesToHash(ns[0][0]), nil -} - -// ComputeMerkleProof computes the merkleProof and root given the leaves of the tree -func (mt *L1InfoTree) ComputeMerkleProof(gerIndex uint32, leaves [][32]byte) ([][32]byte, common.Hash, error) { - var ns [][][]byte - if len(leaves) == 0 { - leaves = append(leaves, mt.zeroHashes[0]) - } - var siblings [][32]byte - index := gerIndex - for h := uint8(0); h < mt.height; h++ { - if len(leaves)%2 == 1 { - leaves = append(leaves, mt.zeroHashes[h]) - } - if index >= uint32(len(leaves)) { - siblings = append(siblings, mt.zeroHashes[h]) - } else { - if index%2 == 1 { // If it is odd - siblings = append(siblings, leaves[index-1]) - } else { // It is even - siblings = append(siblings, leaves[index+1]) - } - } - var ( - nsi [][][]byte - hashes [][32]byte - ) - for i := 0; i < len(leaves); i += 2 { - var left, right = i, i + 1 - hash := Hash(leaves[left], leaves[right]) - nsi = append(nsi, [][]byte{hash[:], leaves[left][:], leaves[right][:]}) - hashes = append(hashes, hash) - } - // Find the index of the leaf in the next level of the tree. - // Divide the index by 2 to find the position in the upper level - index = uint32(float64(index) / 2) //nolint:mnd - ns = nsi - leaves = hashes - } - if len(ns) != 1 { - return nil, common.Hash{}, fmt.Errorf("error: more than one root detected: %+v", ns) - } - - return siblings, common.BytesToHash(ns[0][0]), nil -} - -// AddLeaf adds new leaves to the tree and computes the new root -func (mt *L1InfoTree) AddLeaf(index uint32, leaf [32]byte) (common.Hash, error) { - if index != mt.count { - return common.Hash{}, fmt.Errorf("mismatched leaf count: %d, expected: %d", index, mt.count) - } - cur := leaf - isFilledSubTree := true - - for h := uint8(0); h < mt.height; h++ { - if index&(1< 0 { - var child [32]byte - copy(child[:], cur[:]) - parent := Hash(mt.siblings[h], child) - cur = parent - } else { - if isFilledSubTree { - // we will update the sibling when the sub tree is complete - copy(mt.siblings[h][:], cur[:]) - // we have a left child in this layer, it means the right child is empty so the sub tree is not completed - isFilledSubTree = false - } - var child [32]byte - copy(child[:], cur[:]) - parent := Hash(child, mt.zeroHashes[h]) - cur = parent - // the sibling of 0 bit should be the zero hash, since we are in the last node of the tree - } - } - mt.currentRoot = cur - mt.count++ - - return cur, nil -} - -// initSiblings returns the siblings of the node at the given index. -// it is used to initialize the siblings array in the beginning. -func (mt *L1InfoTree) initSiblings(initialLeaves [][32]byte) ([][32]byte, common.Hash, error) { - if mt.count != uint32(len(initialLeaves)) { - return nil, [32]byte{}, fmt.Errorf("error: mt.count and initialLeaves length mismatch") - } - if mt.count == 0 { - var siblings [][32]byte - for h := 0; h < int(mt.height); h++ { - var left [32]byte - copy(left[:], mt.zeroHashes[h][:]) - siblings = append(siblings, left) - } - root, err := mt.BuildL1InfoRoot(initialLeaves) - if err != nil { - mt.logger.Error("error calculating initial root: ", err) - return nil, [32]byte{}, err - } - - return siblings, root, nil - } - - return mt.ComputeMerkleProof(mt.count, initialLeaves) -} - -// GetCurrentRootCountAndSiblings returns the latest root, count and sibblings -func (mt *L1InfoTree) GetCurrentRootCountAndSiblings() (common.Hash, uint32, [][32]byte) { - return mt.currentRoot, mt.count, mt.siblings -} diff --git a/l1infotree/tree_test.go b/l1infotree/tree_test.go deleted file mode 100644 index 6af4b8b30..000000000 --- a/l1infotree/tree_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package l1infotree_test - -import ( - "encoding/hex" - "encoding/json" - "os" - "testing" - - "github.com/0xPolygon/cdk/l1infotree" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/test/vectors" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestComputeTreeRoot(t *testing.T) { - data, err := os.ReadFile("../test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json") - require.NoError(t, err) - var mtTestVectors []vectors.L1InfoTree - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - for _, testVector := range mtTestVectors { - input := testVector.PreviousLeafValues - mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), [][32]byte{}) - require.NoError(t, err) - - var leaves [][32]byte - for _, v := range input { - leaves = append(leaves, v) - } - - if len(leaves) != 0 { - root, err := mt.BuildL1InfoRoot(leaves) - require.NoError(t, err) - require.Equal(t, testVector.CurrentRoot, root) - } - - leaves = append(leaves, testVector.NewLeafValue) - newRoot, err := mt.BuildL1InfoRoot(leaves) - require.NoError(t, err) - require.Equal(t, testVector.NewRoot, newRoot) - } -} - -func TestComputeMerkleProof(t *testing.T) { - logger := log.GetDefaultLogger() - mt, err := l1infotree.NewL1InfoTree(logger, uint8(32), [][32]byte{}) - require.NoError(t, err) - leaves := [][32]byte{ - common.HexToHash("0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d"), - common.HexToHash("0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d"), - common.HexToHash("0x0349657c7850dc9b2b73010501b01cd6a38911b6a2ad2167c164c5b2a5b344de"), - common.HexToHash("0xb32f96fad8af99f3b3cb90dfbb4849f73435dbee1877e4ac2c213127379549ce"), - common.HexToHash("0x79ffa1294bf48e0dd41afcb23b2929921e4e17f2f81b7163c23078375b06ba4f"), - common.HexToHash("0x0004063b5c83f56a17f580db0908339c01206cdf8b59beb13ce6f146bb025fe2"), - common.HexToHash("0x68e4f2c517c7f60c3664ac6bbe78f904eacdbe84790aa0d15d79ddd6216c556e"), - common.HexToHash("0xf7245f4d84367a189b90873e4563a000702dbfe974b872fdb13323a828c8fb71"), - common.HexToHash("0x0e43332c71c6e2f4a48326258ea17b75d77d3063a4127047dd32a4cb089e62a4"), - common.HexToHash("0xd35a1dc90098c0869a69891094c119eb281cee1a7829d210df1bf8afbea08adc"), - common.HexToHash("0x13bffd0da370d1e80a470821f1bee9607f116881feb708f1ec255da1689164b3"), - common.HexToHash("0x5fa79a24c9bc73cd507b02e5917cef9782529080aa75eacb2bf4e1d45fda7f1d"), - common.HexToHash("0x975b5bbc67345adc6ee6d1d67d1d5cd2a430c231d93e5a8b5a6f00b0c0862215"), - common.HexToHash("0x0d0fa887c045a53ec6212dee58964d0ae89595b7d11745a05c397240a4dceb20"), - common.HexToHash("0xa6ae5bc494a2ee0e5173d0e0b546533973104e0031c69d0cd65cdc7bb4d64670"), - common.HexToHash("0x21ccc18196a8fd74e720c6c129977d80bb804d3331673d6411871df14f7e7ae4"), - common.HexToHash("0xf8b1b98ac75bea8dbed034d0b3cd08b4c9275644c2242781a827e53deb2386c3"), - common.HexToHash("0x26401c418ef8bc5a80380f25f16dfc78b7053a26c0ca425fda294b1678b779fc"), - common.HexToHash("0xc53fd99005361738fc811ce87d194deed34a7f06ebd5371b19a008e8d1e8799f"), - common.HexToHash("0x570bd643e35fbcda95393994812d9212335e6bd4504b3b1dc8f3c6f1eeb247b2"), - common.HexToHash("0xb21ac971d007810540583bd3c0d4f35e0c2f4b62753e51c104a5753c6372caf8"), - common.HexToHash("0xb8dae305b34c749cbbd98993bfd71ec2323e8364861f25b4c5e0ac3c9587e16d"), - common.HexToHash("0x57c7fabd0f70e0059e871953fcb3dd43c6b8a5f348dbe771190cc8b0320336a5"), - common.HexToHash("0x95b0d23c347e2a88fc8e2ab900b09212a1295ab8f169075aa27e8719557d9b06"), - common.HexToHash("0x95b0d23c347e2a88fc8e2ab900b09212a1295ab8f169075aa27e8719557d9b06"), - common.HexToHash("0x95b0d23c347e2a88fc8e2ab900b09212a1295ab8f169075aa27e8719557d9b06"), - } - require.Equal(t, 26, len(leaves)) - siblings, root, err := mt.ComputeMerkleProof(1, leaves) - require.NoError(t, err) - require.Equal(t, "0x4ed479841384358f765966486782abb598ece1d4f834a22474050d66a18ad296", root.String()) - expectedProof := []string{"0x83fc198de31e1b2b1a8212d2430fbb7766c13d9ad305637dea3759065606475d", "0x2815e0bbb1ec18b8b1bc64454a86d072e12ee5d43bb559b44059e01edff0af7a", "0x7fb6cc0f2120368a845cf435da7102ff6e369280f787bc51b8a989fc178f7252", "0x407db5edcdc0ddd4f7327f208f46db40c4c4dbcc46c94a757e1d1654acbd8b72", "0xce2cdd1ef2e87e82264532285998ff37024404ab3a2b77b50eb1ad856ae83e14", "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9"} - for i := 0; i < len(siblings); i++ { - require.Equal(t, expectedProof[i], "0x"+hex.EncodeToString(siblings[i][:])) - } -} - -func TestAddLeaf(t *testing.T) { - data, err := os.ReadFile("../test/vectors/src/merkle-tree/l1-info-tree/proof-vectors.json") - require.NoError(t, err) - var mtTestVectors []vectors.L1InfoTreeProof - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - testVector := mtTestVectors[3] - var leaves [][32]byte - mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), leaves) - require.NoError(t, err) - for _, leaf := range testVector.Leaves { - _, count, _ := mt.GetCurrentRootCountAndSiblings() - _, err := mt.AddLeaf(count, leaf) - require.NoError(t, err) - } - log.Debugf("%d leaves added successfully", len(testVector.Leaves)) - root, _, _ := mt.GetCurrentRootCountAndSiblings() - require.Equal(t, testVector.Root, root) - log.Debug("Final root: ", root) -} - -func TestAddLeaf2(t *testing.T) { - data, err := os.ReadFile("../test/vectors/src/merkle-tree/l1-info-tree/root-vectors.json") - require.NoError(t, err) - var mtTestVectors []vectors.L1InfoTree - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - for _, testVector := range mtTestVectors { - input := testVector.PreviousLeafValues - - var leaves [][32]byte - for _, v := range input { - leaves = append(leaves, v) - } - mt, err := l1infotree.NewL1InfoTree(log.GetDefaultLogger(), uint8(32), leaves) - require.NoError(t, err) - - initialRoot, count, _ := mt.GetCurrentRootCountAndSiblings() - require.Equal(t, testVector.CurrentRoot, initialRoot) - - newRoot, err := mt.AddLeaf(count, testVector.NewLeafValue) - require.NoError(t, err) - require.Equal(t, testVector.NewRoot, newRoot) - } -} diff --git a/l1infotreesync/config.go b/l1infotreesync/config.go deleted file mode 100644 index 64318fae4..000000000 --- a/l1infotreesync/config.go +++ /dev/null @@ -1,20 +0,0 @@ -package l1infotreesync - -import ( - "github.com/0xPolygon/cdk/config/types" - "github.com/ethereum/go-ethereum/common" -) - -type Config struct { - DBPath string `mapstructure:"DBPath"` - GlobalExitRootAddr common.Address `mapstructure:"GlobalExitRootAddr"` - RollupManagerAddr common.Address `mapstructure:"RollupManagerAddr"` - SyncBlockChunkSize uint64 `mapstructure:"SyncBlockChunkSize"` - // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - URLRPCL1 string `mapstructure:"URLRPCL1"` - WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` - InitialBlock uint64 `mapstructure:"InitialBlock"` - RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` -} diff --git a/l1infotreesync/downloader.go b/l1infotreesync/downloader.go deleted file mode 100644 index 48e237025..000000000 --- a/l1infotreesync/downloader.go +++ /dev/null @@ -1,191 +0,0 @@ -package l1infotreesync - -import ( - "fmt" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/etrog/polygonrollupmanager" - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmglobalexitrootv2" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" -) - -var ( - updateL1InfoTreeSignatureV1 = crypto.Keccak256Hash([]byte("UpdateL1InfoTree(bytes32,bytes32)")) - updateL1InfoTreeSignatureV2 = crypto.Keccak256Hash([]byte("UpdateL1InfoTreeV2(bytes32,uint32,uint256,uint64)")) - verifyBatchesSignature = crypto.Keccak256Hash( - []byte("VerifyBatches(uint32,uint64,bytes32,bytes32,address)"), - ) - verifyBatchesTrustedAggregatorSignature = crypto.Keccak256Hash( - []byte("VerifyBatchesTrustedAggregator(uint32,uint64,bytes32,bytes32,address)"), - ) - initL1InfoRootMapSignature = crypto.Keccak256Hash([]byte("InitL1InfoRootMap(uint32,bytes32)")) -) - -type EthClienter interface { - ethereum.LogFilterer - ethereum.BlockNumberReader - ethereum.ChainReader - bind.ContractBackend -} - -func checkSMCIsRollupManager(rollupManagerAddr common.Address, - rollupManagerContract *polygonrollupmanager.Polygonrollupmanager) error { - bridgeAddr, err := rollupManagerContract.BridgeAddress(nil) - if err != nil { - return fmt.Errorf("fail sanity check RollupManager(%s) Contract. Err: %w", rollupManagerAddr.String(), err) - } - log.Infof("sanity check rollupManager(%s) OK. bridgeAddr: %s", rollupManagerAddr.String(), bridgeAddr.String()) - return nil -} - -func checkSMCIsGlobalExitRoot(globalExitRootAddr common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2) error { - depositCount, err := gerContract.DepositCount(nil) - if err != nil { - return fmt.Errorf("fail sanity check GlobalExitRoot(%s) Contract. Err: %w", globalExitRootAddr.String(), err) - } - log.Infof("sanity check GlobalExitRoot(%s) OK. DepositCount: %v", globalExitRootAddr.String(), depositCount) - return nil -} - -func sanityCheckContracts(globalExitRoot, rollupManager common.Address, - gerContract *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - rollupManagerContract *polygonrollupmanager.Polygonrollupmanager) error { - errGER := checkSMCIsGlobalExitRoot(globalExitRoot, gerContract) - errRollup := checkSMCIsRollupManager(rollupManager, rollupManagerContract) - if errGER != nil || errRollup != nil { - err := fmt.Errorf("sanityCheckContracts: fails sanity check contracts. ErrGER: %w, ErrRollup: %w", errGER, errRollup) - log.Error(err) - return err - } - return nil -} - -func createContracts(client EthClienter, globalExitRoot, rollupManager common.Address) ( - *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - *polygonrollupmanager.Polygonrollupmanager, - error) { - gerContract, err := polygonzkevmglobalexitrootv2.NewPolygonzkevmglobalexitrootv2(globalExitRoot, client) - if err != nil { - return nil, nil, err - } - - rollupManagerContract, err := polygonrollupmanager.NewPolygonrollupmanager(rollupManager, client) - if err != nil { - return nil, nil, err - } - return gerContract, rollupManagerContract, nil -} - -func buildAppender(client EthClienter, globalExitRoot, - rollupManager common.Address, flags CreationFlags) (sync.LogAppenderMap, error) { - ger, rm, err := createContracts(client, globalExitRoot, rollupManager) - if err != nil { - err := fmt.Errorf("buildAppender: fails contracts creation. Err:%w", err) - log.Error(err) - return nil, err - } - err = sanityCheckContracts(globalExitRoot, rollupManager, ger, rm) - if err != nil && flags&FlagAllowWrongContractsAddrs == 0 { - return nil, fmt.Errorf("buildAppender: fails sanity check contracts. Err:%w", err) - } - - appender := make(sync.LogAppenderMap) - appender[initL1InfoRootMapSignature] = func(b *sync.EVMBlock, l types.Log) error { - init, err := ger.ParseInitL1InfoRootMap(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using ger.ParseInitL1InfoRootMap: %w", - l, err, - ) - } - b.Events = append(b.Events, Event{InitL1InfoRootMap: &InitL1InfoRootMap{ - LeafCount: init.LeafCount, - CurrentL1InfoRoot: init.CurrentL1InfoRoot, - }}) - - return nil - } - appender[updateL1InfoTreeSignatureV1] = func(b *sync.EVMBlock, l types.Log) error { - l1InfoTreeUpdate, err := ger.ParseUpdateL1InfoTree(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using ger.ParseUpdateL1InfoTree: %w", - l, err, - ) - } - b.Events = append(b.Events, Event{UpdateL1InfoTree: &UpdateL1InfoTree{ - BlockPosition: uint64(l.Index), - MainnetExitRoot: l1InfoTreeUpdate.MainnetExitRoot, - RollupExitRoot: l1InfoTreeUpdate.RollupExitRoot, - ParentHash: b.ParentHash, - Timestamp: b.Timestamp, - }}) - - return nil - } - - appender[updateL1InfoTreeSignatureV2] = func(b *sync.EVMBlock, l types.Log) error { - l1InfoTreeUpdateV2, err := ger.ParseUpdateL1InfoTreeV2(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using ger.ParseUpdateL1InfoTreeV2: %w", - l, err, - ) - } - b.Events = append(b.Events, Event{UpdateL1InfoTreeV2: &UpdateL1InfoTreeV2{ - CurrentL1InfoRoot: l1InfoTreeUpdateV2.CurrentL1InfoRoot, - LeafCount: l1InfoTreeUpdateV2.LeafCount, - Blockhash: common.BytesToHash(l1InfoTreeUpdateV2.Blockhash.Bytes()), - MinTimestamp: l1InfoTreeUpdateV2.MinTimestamp, - }}) - - return nil - } - // This event is coming from RollupManager - appender[verifyBatchesSignature] = func(b *sync.EVMBlock, l types.Log) error { - verifyBatches, err := rm.ParseVerifyBatches(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using rm.ParseVerifyBatches: %w", - l, err, - ) - } - b.Events = append(b.Events, Event{VerifyBatches: &VerifyBatches{ - BlockPosition: uint64(l.Index), - RollupID: verifyBatches.RollupID, - NumBatch: verifyBatches.NumBatch, - StateRoot: verifyBatches.StateRoot, - ExitRoot: verifyBatches.ExitRoot, - Aggregator: verifyBatches.Aggregator, - }}) - - return nil - } - appender[verifyBatchesTrustedAggregatorSignature] = func(b *sync.EVMBlock, l types.Log) error { - verifyBatches, err := rm.ParseVerifyBatchesTrustedAggregator(l) - if err != nil { - return fmt.Errorf( - "error parsing log %+v using rm.ParseVerifyBatches: %w", - l, err, - ) - } - b.Events = append(b.Events, Event{VerifyBatches: &VerifyBatches{ - BlockPosition: uint64(l.Index), - RollupID: verifyBatches.RollupID, - NumBatch: verifyBatches.NumBatch, - StateRoot: verifyBatches.StateRoot, - ExitRoot: verifyBatches.ExitRoot, - Aggregator: verifyBatches.Aggregator, - }}) - - return nil - } - - return appender, nil -} diff --git a/l1infotreesync/downloader_test.go b/l1infotreesync/downloader_test.go deleted file mode 100644 index 9d79b9a6c..000000000 --- a/l1infotreesync/downloader_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package l1infotreesync - -import ( - "fmt" - "math/big" - "strings" - "testing" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmglobalexitrootv2" - mocks_l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync/mocks" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestBuildAppenderErrorOnBadContractAddr(t *testing.T) { - l1Client := mocks_l1infotreesync.NewEthClienter(t) - globalExitRoot := common.HexToAddress("0x1") - rollupManager := common.HexToAddress("0x2") - l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test-error")) - flags := FlagNone - _, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) - require.Error(t, err) -} - -func TestBuildAppenderBypassBadContractAddr(t *testing.T) { - l1Client := mocks_l1infotreesync.NewEthClienter(t) - globalExitRoot := common.HexToAddress("0x1") - rollupManager := common.HexToAddress("0x2") - l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("test-error")) - flags := FlagAllowWrongContractsAddrs - _, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) - require.NoError(t, err) -} - -func TestBuildAppenderVerifiedContractAddr(t *testing.T) { - l1Client := mocks_l1infotreesync.NewEthClienter(t) - globalExitRoot := common.HexToAddress("0x1") - rollupManager := common.HexToAddress("0x2") - - smcAbi, err := abi.JSON(strings.NewReader(polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2ABI)) - require.NoError(t, err) - bigInt := big.NewInt(1) - returnGER, err := smcAbi.Methods["depositCount"].Outputs.Pack(bigInt) - require.NoError(t, err) - l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(returnGER, nil).Once() - v := common.HexToAddress("0x1234") - returnRM, err := smcAbi.Methods["bridgeAddress"].Outputs.Pack(v) - require.NoError(t, err) - l1Client.EXPECT().CallContract(mock.Anything, mock.Anything, mock.Anything).Return(returnRM, nil).Once() - flags := FlagNone - _, err = buildAppender(l1Client, globalExitRoot, rollupManager, flags) - require.NoError(t, err) -} diff --git a/l1infotreesync/e2e_test.go b/l1infotreesync/e2e_test.go deleted file mode 100644 index ea3db7ca9..000000000 --- a/l1infotreesync/e2e_test.go +++ /dev/null @@ -1,345 +0,0 @@ -package l1infotreesync_test - -import ( - "context" - "fmt" - "math/big" - "path" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmglobalexitrootv2" - cdktypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - mocks_l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync/mocks" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/0xPolygon/cdk/test/contracts/verifybatchesmock" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func newSimulatedClient(t *testing.T) ( - *simulated.Backend, - *bind.TransactOpts, - common.Address, - common.Address, - *polygonzkevmglobalexitrootv2.Polygonzkevmglobalexitrootv2, - *verifybatchesmock.Verifybatchesmock, -) { - t.Helper() - ctx := context.Background() - - deployerAuth, err := helpers.CreateAccount(big.NewInt(1337)) - require.NoError(t, err) - - client, setup := helpers.NewSimulatedBackend(t, nil, deployerAuth) - - nonce, err := client.Client().PendingNonceAt(ctx, setup.UserAuth.From) - require.NoError(t, err) - - precalculatedGERAddr := crypto.CreateAddress(setup.UserAuth.From, nonce+1) - verifyAddr, _, verifyContract, err := verifybatchesmock.DeployVerifybatchesmock(setup.UserAuth, client.Client(), precalculatedGERAddr) - require.NoError(t, err) - client.Commit() - - gerAddr, _, gerContract, err := polygonzkevmglobalexitrootv2.DeployPolygonzkevmglobalexitrootv2(setup.UserAuth, client.Client(), verifyAddr, setup.UserAuth.From) - require.NoError(t, err) - require.Equal(t, precalculatedGERAddr, gerAddr) - client.Commit() - - err = setup.DeployBridge(client, gerAddr, 0) - require.NoError(t, err) - - return client, setup.UserAuth, gerAddr, verifyAddr, gerContract, verifyContract -} - -func TestE2E(t *testing.T) { - ctx, cancelCtx := context.WithCancel(context.Background()) - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestE2E.sqlite") - - rdm := mocks_l1infotreesync.NewReorgDetectorMock(t) - rdm.On("Subscribe", mock.Anything).Return(&reorgdetector.Subscription{}, nil) - rdm.On("AddBlockToTrack", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) - - client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) - syncer, err := l1infotreesync.New(ctx, dbPath, gerAddr, verifyAddr, 10, etherman.LatestBlock, rdm, client.Client(), time.Millisecond, 0, 100*time.Millisecond, 25, - l1infotreesync.FlagAllowWrongContractsAddrs, etherman.SafeBlock) - require.NoError(t, err) - - go syncer.Start(ctx) - - // Update GER 3 times - for i := 0; i < 3; i++ { - tx, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - client.Commit() - g, err := gerSc.L1InfoRootMap(nil, uint32(i+1)) - require.NoError(t, err) - receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) - // Let the processor catch up - helpers.RequireProcessorUpdated(t, syncer, receipt.BlockNumber.Uint64()) - - expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("index: %d", i)) - require.Equal(t, receipt.BlockNumber.Uint64(), info.BlockNumber) - - expectedRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - require.Equal(t, g, expectedRoot) - actualRoot, err := syncer.GetL1InfoTreeRootByIndex(ctx, uint32(i)) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRoot), actualRoot.Hash) - } - - // Restart syncer - cancelCtx() - ctx = context.Background() - go syncer.Start(ctx) - - // Update 3 rollups (verify batches event) 3 times - for rollupID := uint32(1); rollupID < 3; rollupID++ { - for i := 0; i < 3; i++ { - newLocalExitRoot := common.HexToHash(strconv.Itoa(int(rollupID)) + "ffff" + strconv.Itoa(i)) - tx, err := verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, i%2 != 0) - require.NoError(t, err) - client.Commit() - receipt, err := client.Client().TransactionReceipt(ctx, tx.Hash()) - require.NoError(t, err) - require.Equal(t, receipt.Status, types.ReceiptStatusSuccessful) - require.True(t, len(receipt.Logs) == 1+i%2+i%2) - - // Let the processor catch - helpers.RequireProcessorUpdated(t, syncer, receipt.BlockNumber.Uint64()) - - // Assert rollup exit root - expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash, fmt.Sprintf("rollupID: %d, i: %d", rollupID, i)) - - // Assert verify batches - expectedVerify := l1infotreesync.VerifyBatches{ - BlockNumber: receipt.BlockNumber.Uint64(), - BlockPosition: uint64(i%2 + i%2), - RollupID: rollupID, - ExitRoot: newLocalExitRoot, - Aggregator: auth.From, - RollupExitRoot: expectedRollupExitRoot, - } - actualVerify, err := syncer.GetLastVerifiedBatches(rollupID) - require.NoError(t, err) - require.Equal(t, expectedVerify, *actualVerify) - } - } -} - -func TestWithReorgs(t *testing.T) { - ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_sync.sqlite") - dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestWithReorgs_reorg.sqlite") - - client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) - - rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 30)}, reorgdetector.L1) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 25, - l1infotreesync.FlagAllowWrongContractsAddrs, etherman.SafeBlock) - require.NoError(t, err) - go syncer.Start(ctx) - - // Commit block - header, err := client.Client().HeaderByHash(ctx, client.Commit()) // Block 3 - require.NoError(t, err) - reorgFrom := header.Hash() - fmt.Println("start from header:", header.Number) - - updateL1InfoTreeAndRollupExitTree := func(i int, rollupID uint32) { - // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - - // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(1)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) - - // Update Rollup Exit Tree - newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(2)) - _, err = verifySC.VerifyBatchesTrustedAggregator(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) - require.NoError(t, err) - } - - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(1, 1) - - // Block 4 - helpers.CommitBlocks(t, client, 1, time.Second*5) - - // Make sure syncer is up to date - waitForSyncerToCatchUp(ctx, t, syncer, client) - - // Assert rollup exit root - expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - - // Assert L1 Info tree root - expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualL1InfoRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) - require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, actualL1InfoRoot.Index) - require.NoError(t, err) - - require.Equal(t, common.Hash(expectedL1InfoRoot), actualL1InfoRoot.Hash) - require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) - - // Forking from block 3 - err = client.Fork(reorgFrom) - require.NoError(t, err) - - // Block 4, 5, 6 after the fork - helpers.CommitBlocks(t, client, 3, time.Millisecond*500) - - // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.ErrorContains(t, err, "not found") // rollup exit tree reorged, it does not have any exits in it - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - - // Forking from block 3 again - err = client.Fork(reorgFrom) - require.NoError(t, err) - time.Sleep(time.Millisecond * 500) - - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) - - // create some events and update the trees - updateL1InfoTreeAndRollupExitTree(2, 1) - - helpers.CommitBlocks(t, client, 1, time.Millisecond*100) - - // Make sure syncer is up to date - waitForSyncerToCatchUp(ctx, t, syncer, client) - - // Assert rollup exit root after the fork - should be zero since there are no events in the block after the fork - expectedRollupExitRoot, err = verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err = syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) -} - -func TestStressAndReorgs(t *testing.T) { - const ( - totalIterations = 3 - blocksInIteration = 140 - reorgEveryXIterations = 70 - reorgSizeInBlocks = 2 - maxRollupID = 31 - extraBlocksToMine = 10 - ) - - ctx := context.Background() - dbPathSyncer := path.Join(t.TempDir(), "l1infotreesyncTestStressAndReorgs_sync.sqlite") - dbPathReorg := path.Join(t.TempDir(), "l1infotreesyncTestStressAndReorgs_reorg.sqlite") - - client, auth, gerAddr, verifyAddr, gerSc, verifySC := newSimulatedClient(t) - - rd, err := reorgdetector.New(client.Client(), reorgdetector.Config{DBPath: dbPathReorg, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, reorgdetector.L1) - require.NoError(t, err) - require.NoError(t, rd.Start(ctx)) - - syncer, err := l1infotreesync.New(ctx, dbPathSyncer, gerAddr, verifyAddr, 10, etherman.LatestBlock, rd, client.Client(), time.Millisecond, 0, time.Second, 100, - l1infotreesync.FlagAllowWrongContractsAddrs, etherman.SafeBlock) - require.NoError(t, err) - go syncer.Start(ctx) - - updateL1InfoTreeAndRollupExitTree := func(i, j int, rollupID uint32) { - // Update L1 Info Tree - _, err := gerSc.UpdateExitRoot(auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - - // Update L1 Info Tree + Rollup Exit Tree - newLocalExitRoot := common.HexToHash(strconv.Itoa(i) + "ffff" + strconv.Itoa(j)) - _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, true) - require.NoError(t, err) - - // Update Rollup Exit Tree - newLocalExitRoot = common.HexToHash(strconv.Itoa(i) + "fffa" + strconv.Itoa(j)) - _, err = verifySC.VerifyBatches(auth, rollupID, 0, newLocalExitRoot, common.Hash{}, false) - require.NoError(t, err) - } - - for i := 1; i <= totalIterations; i++ { - for j := 1; j <= blocksInIteration; j++ { - helpers.CommitBlocks(t, client, 1, time.Millisecond*10) - if j%reorgEveryXIterations == 0 { - helpers.Reorg(t, client, reorgSizeInBlocks) - } else { - updateL1InfoTreeAndRollupExitTree(i, j, uint32(j%maxRollupID)+1) - } - } - } - - helpers.CommitBlocks(t, client, 11, time.Millisecond*10) - - waitForSyncerToCatchUp(ctx, t, syncer, client) - - // Assert rollup exit root - expectedRollupExitRoot, err := verifySC.GetRollupExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - actualRollupExitRoot, err := syncer.GetLastRollupExitRoot(ctx) - require.NoError(t, err) - require.Equal(t, common.Hash(expectedRollupExitRoot), actualRollupExitRoot.Hash) - - // Assert L1 Info tree root - expectedL1InfoRoot, err := gerSc.GetRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - expectedGER, err := gerSc.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - lastRoot, err := syncer.GetLastL1InfoTreeRoot(ctx) - require.NoError(t, err) - info, err := syncer.GetInfoByIndex(ctx, lastRoot.Index) - require.NoError(t, err, fmt.Sprintf("index: %d", lastRoot.Index)) - - t.Logf("expectedL1InfoRoot: %s", common.Hash(expectedL1InfoRoot).String()) - require.Equal(t, common.Hash(expectedGER), info.GlobalExitRoot, fmt.Sprintf("%+v", info)) - require.Equal(t, common.Hash(expectedL1InfoRoot), lastRoot.Hash) -} - -func waitForSyncerToCatchUp(ctx context.Context, t *testing.T, syncer *l1infotreesync.L1InfoTreeSync, client *simulated.Backend) { - t.Helper() - for { - lastBlockNum, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - helpers.RequireProcessorUpdated(t, syncer, lastBlockNum) - time.Sleep(time.Second / 2) - lastBlockNum2, err := client.Client().BlockNumber(ctx) - require.NoError(t, err) - if lastBlockNum == lastBlockNum2 { - return - } - } -} diff --git a/l1infotreesync/l1infotreesync.go b/l1infotreesync/l1infotreesync.go deleted file mode 100644 index df685d520..000000000 --- a/l1infotreesync/l1infotreesync.go +++ /dev/null @@ -1,278 +0,0 @@ -package l1infotreesync - -import ( - "context" - "errors" - "time" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -type CreationFlags uint64 - -const ( - reorgDetectorID = "l1infotreesync" - downloadBufferSize = 1000 - // CreationFlags defitinion - FlagNone CreationFlags = 0 - FlagAllowWrongContractsAddrs CreationFlags = 1 << iota // Allow to set wrong contracts addresses -) - -var ( - ErrNotFound = errors.New("l1infotreesync: not found") -) - -type L1InfoTreeSync struct { - processor *processor - driver *sync.EVMDriver -} - -// New creates a L1 Info tree syncer that syncs the L1 info tree -// and the rollup exit tree -func New( - ctx context.Context, - dbPath string, - globalExitRoot, rollupManager common.Address, - syncBlockChunkSize uint64, - blockFinalityType etherman.BlockNumberFinality, - rd sync.ReorgDetector, - l1Client EthClienter, - waitForNewBlocksPeriod time.Duration, - initialBlock uint64, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - flags CreationFlags, - finalizedBlockType etherman.BlockNumberFinality, -) (*L1InfoTreeSync, error) { - processor, err := newProcessor(dbPath) - if err != nil { - return nil, err - } - // TODO: get the initialBlock from L1 to simplify config - lastProcessedBlock, err := processor.GetLastProcessedBlock(ctx) - if err != nil { - return nil, err - } - if initialBlock > 0 && lastProcessedBlock < initialBlock-1 { - err = processor.ProcessBlock(ctx, sync.Block{ - Num: initialBlock - 1, - }) - if err != nil { - return nil, err - } - } - rh := &sync.RetryHandler{ - RetryAfterErrorPeriod: retryAfterErrorPeriod, - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - } - - appender, err := buildAppender(l1Client, globalExitRoot, rollupManager, flags) - if err != nil { - return nil, err - } - downloader, err := sync.NewEVMDownloader( - "l1infotreesync", - l1Client, - syncBlockChunkSize, - blockFinalityType, - waitForNewBlocksPeriod, - appender, - []common.Address{globalExitRoot, rollupManager}, - rh, - finalizedBlockType, - ) - if err != nil { - return nil, err - } - - driver, err := sync.NewEVMDriver(rd, processor, downloader, reorgDetectorID, downloadBufferSize, rh) - if err != nil { - return nil, err - } - - return &L1InfoTreeSync{ - processor: processor, - driver: driver, - }, nil -} - -// Start starts the synchronization process -func (s *L1InfoTreeSync) Start(ctx context.Context) { - s.driver.Sync(ctx) -} - -// GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree -func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProof(ctx context.Context, index uint32) (types.Proof, types.Root, error) { - if s.processor.isHalted() { - return types.Proof{}, types.Root{}, sync.ErrInconsistentState - } - return s.processor.GetL1InfoTreeMerkleProof(ctx, index) -} - -// GetRollupExitTreeMerkleProof creates a merkle proof for the rollup exit tree -func (s *L1InfoTreeSync) GetRollupExitTreeMerkleProof( - ctx context.Context, - networkID uint32, - root common.Hash, -) (types.Proof, error) { - if s.processor.isHalted() { - return types.Proof{}, sync.ErrInconsistentState - } - if networkID == 0 { - return tree.EmptyProof, nil - } - - return s.processor.rollupExitTree.GetProof(ctx, networkID-1, root) -} - -func translateError(err error) error { - if errors.Is(err, db.ErrNotFound) { - return ErrNotFound - } - return err -} - -// GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. -// If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned -// It can returns next errors: -// - ErrBlockNotProcessed, -// - ErrNotFound -func (s *L1InfoTreeSync) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - leaf, err := s.processor.GetLatestInfoUntilBlock(ctx, blockNum) - return leaf, translateError(err) -} - -// GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree -func (s *L1InfoTreeSync) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetInfoByIndex(ctx, index) -} - -// GetL1InfoTreeRootByIndex returns the root of the L1 info tree at the moment the leaf with the given index was added -func (s *L1InfoTreeSync) GetL1InfoTreeRootByIndex(ctx context.Context, index uint32) (types.Root, error) { - if s.processor.isHalted() { - return types.Root{}, sync.ErrInconsistentState - } - return s.processor.l1InfoTree.GetRootByIndex(ctx, index) -} - -// GetLastRollupExitRoot return the last rollup exit root processed -func (s *L1InfoTreeSync) GetLastRollupExitRoot(ctx context.Context) (types.Root, error) { - if s.processor.isHalted() { - return types.Root{}, sync.ErrInconsistentState - } - return s.processor.rollupExitTree.GetLastRoot(nil) -} - -// GetLastL1InfoTreeRoot return the last root and index processed from the L1 Info tree -func (s *L1InfoTreeSync) GetLastL1InfoTreeRoot(ctx context.Context) (types.Root, error) { - if s.processor.isHalted() { - return types.Root{}, sync.ErrInconsistentState - } - return s.processor.l1InfoTree.GetLastRoot(nil) -} - -// GetLastProcessedBlock return the last processed block -func (s *L1InfoTreeSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - if s.processor.isHalted() { - return 0, sync.ErrInconsistentState - } - return s.processor.GetLastProcessedBlock(ctx) -} - -func (s *L1InfoTreeSync) GetLocalExitRoot( - ctx context.Context, networkID uint32, rollupExitRoot common.Hash, -) (common.Hash, error) { - if s.processor.isHalted() { - return common.Hash{}, sync.ErrInconsistentState - } - if networkID == 0 { - return common.Hash{}, errors.New("network 0 is not a rollup, and it's not part of the rollup exit tree") - } - - return s.processor.rollupExitTree.GetLeaf(nil, networkID-1, rollupExitRoot) -} - -func (s *L1InfoTreeSync) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetLastVerifiedBatches(rollupID) -} - -func (s *L1InfoTreeSync) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetFirstVerifiedBatches(rollupID) -} - -func (s *L1InfoTreeSync) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetFirstVerifiedBatchesAfterBlock(rollupID, blockNum) -} - -func (s *L1InfoTreeSync) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetFirstL1InfoWithRollupExitRoot(rollupExitRoot) -} - -func (s *L1InfoTreeSync) GetLastInfo() (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetLastInfo() -} - -func (s *L1InfoTreeSync) GetFirstInfo() (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetFirstInfo() -} - -func (s *L1InfoTreeSync) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetFirstInfoAfterBlock(blockNum) -} - -func (s *L1InfoTreeSync) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetInfoByGlobalExitRoot(ger) -} - -// GetL1InfoTreeMerkleProofFromIndexToRoot creates a merkle proof for the L1 Info tree -func (s *L1InfoTreeSync) GetL1InfoTreeMerkleProofFromIndexToRoot( - ctx context.Context, index uint32, root common.Hash, -) (types.Proof, error) { - if s.processor.isHalted() { - return types.Proof{}, sync.ErrInconsistentState - } - return s.processor.l1InfoTree.GetProof(ctx, index, root) -} - -// GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set -func (s *L1InfoTreeSync) GetInitL1InfoRootMap(ctx context.Context) (*L1InfoTreeInitial, error) { - if s.processor.isHalted() { - return nil, sync.ErrInconsistentState - } - return s.processor.GetInitL1InfoRootMap(nil) -} diff --git a/l1infotreesync/l1infotreesync_test.go b/l1infotreesync/l1infotreesync_test.go deleted file mode 100644 index a6c5ef036..000000000 --- a/l1infotreesync/l1infotreesync_test.go +++ /dev/null @@ -1,198 +0,0 @@ -package l1infotreesync - -import ( - "context" - "errors" - "testing" - - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestGetL1InfoTreeMerkleProof(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, _, err := s.GetL1InfoTreeMerkleProof(context.Background(), 0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetRollupExitTreeMerkleProof(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetRollupExitTreeMerkleProof(context.Background(), 0, common.Hash{}) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLatestInfoUntilBlock(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLatestInfoUntilBlock(context.Background(), 0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetInfoByIndex(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetInfoByIndex(context.Background(), 0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetL1InfoTreeRootByIndex(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetL1InfoTreeRootByIndex(context.Background(), 0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLastRollupExitRoot(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLastRollupExitRoot(context.Background()) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLastL1InfoTreeRoot(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLastL1InfoTreeRoot(context.Background()) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLastProcessedBlock(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLastProcessedBlock(context.Background()) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLocalExitRoot(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLocalExitRoot(context.Background(), 0, common.Hash{}) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLastVerifiedBatches(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLastVerifiedBatches(0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetFirstVerifiedBatches(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetFirstVerifiedBatches(0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetFirstVerifiedBatchesAfterBlock(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetFirstVerifiedBatchesAfterBlock(0, 0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetFirstL1InfoWithRollupExitRoot(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetFirstL1InfoWithRollupExitRoot(common.Hash{}) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetLastInfo(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetLastInfo() - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetFirstInfo(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetFirstInfo() - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetFirstInfoAfterBlock(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetFirstInfoAfterBlock(0) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} - -func TestGetL1InfoTreeMerkleProofFromIndexToRoot(t *testing.T) { - s := L1InfoTreeSync{ - processor: &processor{ - halted: true, - }, - } - _, err := s.GetL1InfoTreeMerkleProofFromIndexToRoot(context.Background(), 0, common.Hash{}) - require.Error(t, err) - require.True(t, errors.Is(err, sync.ErrInconsistentState)) -} diff --git a/l1infotreesync/migrations/l1infotreesync0001.sql b/l1infotreesync/migrations/l1infotreesync0001.sql deleted file mode 100644 index 7a6892812..000000000 --- a/l1infotreesync/migrations/l1infotreesync0001.sql +++ /dev/null @@ -1,34 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS block; -DROP TABLE IF EXISTS claim; -DROP TABLE IF EXISTS bridge; - --- +migrate Up -CREATE TABLE block ( - num BIGINT PRIMARY KEY -); - -CREATE TABLE l1info_leaf ( - block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, - block_pos INTEGER NOT NULL, - position INTEGER NOT NULL, - previous_block_hash VARCHAR NOT NULL, - timestamp INTEGER NOT NULL, - mainnet_exit_root VARCHAR NOT NULL, - rollup_exit_root VARCHAR NOT NULL, - global_exit_root VARCHAR NOT NULL UNIQUE, - hash VARCHAR NOT NULL, - PRIMARY KEY (block_num, block_pos) -); - -CREATE TABLE verify_batches ( - block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, - block_pos INTEGER NOT NULL, - rollup_id INTEGER NOT NULL, - batch_num INTEGER NOT NULL, - state_root VARCHAR NOT NULL, - exit_root VARCHAR NOT NULL, - aggregator VARCHAR NOT NULL, - rollup_exit_root VARCHAR NOT NULL, - PRIMARY KEY (block_num, block_pos) -); diff --git a/l1infotreesync/migrations/l1infotreesync0002.sql b/l1infotreesync/migrations/l1infotreesync0002.sql deleted file mode 100644 index d1f09481b..000000000 --- a/l1infotreesync/migrations/l1infotreesync0002.sql +++ /dev/null @@ -1,14 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS l1info_initial; - --- +migrate Up - -CREATE TABLE l1info_initial ( - -- single_row_id prevent to have more than 1 row in this table - single_row_id INTEGER check(single_row_id=1) NOT NULL DEFAULT 1, - block_num INTEGER NOT NULL REFERENCES block(num) ON DELETE CASCADE, - leaf_count INTEGER NOT NULL, - l1_info_root VARCHAR NOT NULL, - PRIMARY KEY (single_row_id) -); - diff --git a/l1infotreesync/migrations/l1infotreesync0003.sql b/l1infotreesync/migrations/l1infotreesync0003.sql deleted file mode 100644 index 0453081d7..000000000 --- a/l1infotreesync/migrations/l1infotreesync0003.sql +++ /dev/null @@ -1,5 +0,0 @@ --- +migrate Down -ALTER TABLE block DROP COLUMN hash; - --- +migrate Up -ALTER TABLE block ADD COLUMN hash VARCHAR; \ No newline at end of file diff --git a/l1infotreesync/migrations/migrations.go b/l1infotreesync/migrations/migrations.go deleted file mode 100644 index 6de760147..000000000 --- a/l1infotreesync/migrations/migrations.go +++ /dev/null @@ -1,53 +0,0 @@ -package migrations - -import ( - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" - treeMigrations "github.com/0xPolygon/cdk/tree/migrations" -) - -const ( - RollupExitTreePrefix = "rollup_exit_" - L1InfoTreePrefix = "l1_info_" -) - -//go:embed l1infotreesync0001.sql -var mig001 string - -//go:embed l1infotreesync0002.sql -var mig002 string - -//go:embed l1infotreesync0003.sql -var mig003 string - -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "l1infotreesync0001", - SQL: mig001, - }, - { - ID: "l1infotreesync0002", - SQL: mig002, - }, - { - ID: "l1infotreesync0003", - SQL: mig003, - }, - } - for _, tm := range treeMigrations.Migrations { - migrations = append(migrations, types.Migration{ - ID: tm.ID, - SQL: tm.SQL, - Prefix: RollupExitTreePrefix, - }) - migrations = append(migrations, types.Migration{ - ID: tm.ID, - SQL: tm.SQL, - Prefix: L1InfoTreePrefix, - }) - } - return db.RunMigrations(dbPath, migrations) -} diff --git a/l1infotreesync/mocks/eth_clienter.go b/l1infotreesync/mocks/eth_clienter.go deleted file mode 100644 index 270c40d92..000000000 --- a/l1infotreesync/mocks/eth_clienter.go +++ /dev/null @@ -1,1086 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks_l1infotreesync - -import ( - context "context" - big "math/big" - - common "github.com/ethereum/go-ethereum/common" - - ethereum "github.com/ethereum/go-ethereum" - - mock "github.com/stretchr/testify/mock" - - types "github.com/ethereum/go-ethereum/core/types" -) - -// EthClienter is an autogenerated mock type for the EthClienter type -type EthClienter struct { - mock.Mock -} - -type EthClienter_Expecter struct { - mock *mock.Mock -} - -func (_m *EthClienter) EXPECT() *EthClienter_Expecter { - return &EthClienter_Expecter{mock: &_m.Mock} -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *EthClienter) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for BlockByHash") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' -type EthClienter_BlockByHash_Call struct { - *mock.Call -} - -// BlockByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *EthClienter_Expecter) BlockByHash(ctx interface{}, hash interface{}) *EthClienter_BlockByHash_Call { - return &EthClienter_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} -} - -func (_c *EthClienter_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_BlockByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClienter_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *EthClienter_BlockByHash_Call { - _c.Call.Return(run) - return _c -} - -// BlockByNumber provides a mock function with given fields: ctx, number -func (_m *EthClienter) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for BlockByNumber") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' -type EthClienter_BlockByNumber_Call struct { - *mock.Call -} - -// BlockByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClienter_Expecter) BlockByNumber(ctx interface{}, number interface{}) *EthClienter_BlockByNumber_Call { - return &EthClienter_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} -} - -func (_c *EthClienter_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_BlockByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *EthClienter_BlockByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *EthClienter_BlockByNumber_Call { - _c.Call.Return(run) - return _c -} - -// BlockNumber provides a mock function with given fields: ctx -func (_m *EthClienter) BlockNumber(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type EthClienter_BlockNumber_Call struct { - *mock.Call -} - -// BlockNumber is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClienter_Expecter) BlockNumber(ctx interface{}) *EthClienter_BlockNumber_Call { - return &EthClienter_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} -} - -func (_c *EthClienter_BlockNumber_Call) Run(run func(ctx context.Context)) *EthClienter_BlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClienter_BlockNumber_Call) Return(_a0 uint64, _a1 error) *EthClienter_BlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *EthClienter_BlockNumber_Call { - _c.Call.Return(run) - return _c -} - -// CallContract provides a mock function with given fields: ctx, call, blockNumber -func (_m *EthClienter) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, call, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CallContract") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { - return rf(ctx, call, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { - r0 = rf(ctx, call, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { - r1 = rf(ctx, call, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' -type EthClienter_CallContract_Call struct { - *mock.Call -} - -// CallContract is a helper method to define mock.On call -// - ctx context.Context -// - call ethereum.CallMsg -// - blockNumber *big.Int -func (_e *EthClienter_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *EthClienter_CallContract_Call { - return &EthClienter_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} -} - -func (_c *EthClienter_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *EthClienter_CallContract_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_CallContract_Call) Return(_a0 []byte, _a1 error) *EthClienter_CallContract_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *EthClienter_CallContract_Call { - _c.Call.Return(run) - return _c -} - -// CodeAt provides a mock function with given fields: ctx, contract, blockNumber -func (_m *EthClienter) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, contract, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { - return rf(ctx, contract, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { - r0 = rf(ctx, contract, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, contract, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' -type EthClienter_CodeAt_Call struct { - *mock.Call -} - -// CodeAt is a helper method to define mock.On call -// - ctx context.Context -// - contract common.Address -// - blockNumber *big.Int -func (_e *EthClienter_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *EthClienter_CodeAt_Call { - return &EthClienter_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} -} - -func (_c *EthClienter_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *EthClienter_CodeAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_CodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_CodeAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *EthClienter_CodeAt_Call { - _c.Call.Return(run) - return _c -} - -// EstimateGas provides a mock function with given fields: ctx, call -func (_m *EthClienter) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - ret := _m.Called(ctx, call) - - if len(ret) == 0 { - panic("no return value specified for EstimateGas") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { - return rf(ctx, call) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { - r0 = rf(ctx, call) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { - r1 = rf(ctx, call) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' -type EthClienter_EstimateGas_Call struct { - *mock.Call -} - -// EstimateGas is a helper method to define mock.On call -// - ctx context.Context -// - call ethereum.CallMsg -func (_e *EthClienter_Expecter) EstimateGas(ctx interface{}, call interface{}) *EthClienter_EstimateGas_Call { - return &EthClienter_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} -} - -func (_c *EthClienter_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *EthClienter_EstimateGas_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.CallMsg)) - }) - return _c -} - -func (_c *EthClienter_EstimateGas_Call) Return(_a0 uint64, _a1 error) *EthClienter_EstimateGas_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *EthClienter_EstimateGas_Call { - _c.Call.Return(run) - return _c -} - -// FilterLogs provides a mock function with given fields: ctx, q -func (_m *EthClienter) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - ret := _m.Called(ctx, q) - - if len(ret) == 0 { - panic("no return value specified for FilterLogs") - } - - var r0 []types.Log - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { - return rf(ctx, q) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { - r0 = rf(ctx, q) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Log) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { - r1 = rf(ctx, q) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' -type EthClienter_FilterLogs_Call struct { - *mock.Call -} - -// FilterLogs is a helper method to define mock.On call -// - ctx context.Context -// - q ethereum.FilterQuery -func (_e *EthClienter_Expecter) FilterLogs(ctx interface{}, q interface{}) *EthClienter_FilterLogs_Call { - return &EthClienter_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} -} - -func (_c *EthClienter_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *EthClienter_FilterLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) - }) - return _c -} - -func (_c *EthClienter_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *EthClienter_FilterLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *EthClienter_FilterLogs_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByHash provides a mock function with given fields: ctx, hash -func (_m *EthClienter) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for HeaderByHash") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' -type EthClienter_HeaderByHash_Call struct { - *mock.Call -} - -// HeaderByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *EthClienter_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClienter_HeaderByHash_Call { - return &EthClienter_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} -} - -func (_c *EthClienter_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClienter_HeaderByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClienter_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClienter_HeaderByHash_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *EthClienter) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type EthClienter_HeaderByNumber_Call struct { - *mock.Call -} - -// HeaderByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClienter_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClienter_HeaderByNumber_Call { - return &EthClienter_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} -} - -func (_c *EthClienter_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClienter_HeaderByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClienter_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClienter_HeaderByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClienter_HeaderByNumber_Call { - _c.Call.Return(run) - return _c -} - -// PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *EthClienter) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingCodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { - r0 = rf(ctx, account) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' -type EthClienter_PendingCodeAt_Call struct { - *mock.Call -} - -// PendingCodeAt is a helper method to define mock.On call -// - ctx context.Context -// - account common.Address -func (_e *EthClienter_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *EthClienter_PendingCodeAt_Call { - return &EthClienter_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} -} - -func (_c *EthClienter_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingCodeAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address)) - }) - return _c -} - -func (_c *EthClienter_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *EthClienter_PendingCodeAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *EthClienter_PendingCodeAt_Call { - _c.Call.Return(run) - return _c -} - -// PendingNonceAt provides a mock function with given fields: ctx, account -func (_m *EthClienter) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingNonceAt") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { - r0 = rf(ctx, account) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' -type EthClienter_PendingNonceAt_Call struct { - *mock.Call -} - -// PendingNonceAt is a helper method to define mock.On call -// - ctx context.Context -// - account common.Address -func (_e *EthClienter_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *EthClienter_PendingNonceAt_Call { - return &EthClienter_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} -} - -func (_c *EthClienter_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *EthClienter_PendingNonceAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address)) - }) - return _c -} - -func (_c *EthClienter_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *EthClienter_PendingNonceAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *EthClienter_PendingNonceAt_Call { - _c.Call.Return(run) - return _c -} - -// SendTransaction provides a mock function with given fields: ctx, tx -func (_m *EthClienter) SendTransaction(ctx context.Context, tx *types.Transaction) error { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SendTransaction") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { - r0 = rf(ctx, tx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// EthClienter_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' -type EthClienter_SendTransaction_Call struct { - *mock.Call -} - -// SendTransaction is a helper method to define mock.On call -// - ctx context.Context -// - tx *types.Transaction -func (_e *EthClienter_Expecter) SendTransaction(ctx interface{}, tx interface{}) *EthClienter_SendTransaction_Call { - return &EthClienter_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} -} - -func (_c *EthClienter_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *EthClienter_SendTransaction_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*types.Transaction)) - }) - return _c -} - -func (_c *EthClienter_SendTransaction_Call) Return(_a0 error) *EthClienter_SendTransaction_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EthClienter_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *EthClienter_SendTransaction_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *EthClienter) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - ret := _m.Called(ctx, q, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeFilterLogs") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { - return rf(ctx, q, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { - r0 = rf(ctx, q, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { - r1 = rf(ctx, q, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' -type EthClienter_SubscribeFilterLogs_Call struct { - *mock.Call -} - -// SubscribeFilterLogs is a helper method to define mock.On call -// - ctx context.Context -// - q ethereum.FilterQuery -// - ch chan<- types.Log -func (_e *EthClienter_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *EthClienter_SubscribeFilterLogs_Call { - return &EthClienter_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} -} - -func (_c *EthClienter_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *EthClienter_SubscribeFilterLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) - }) - return _c -} - -func (_c *EthClienter_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeFilterLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *EthClienter_SubscribeFilterLogs_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeNewHead provides a mock function with given fields: ctx, ch -func (_m *EthClienter) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - ret := _m.Called(ctx, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeNewHead") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { - return rf(ctx, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { - r0 = rf(ctx, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { - r1 = rf(ctx, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' -type EthClienter_SubscribeNewHead_Call struct { - *mock.Call -} - -// SubscribeNewHead is a helper method to define mock.On call -// - ctx context.Context -// - ch chan<- *types.Header -func (_e *EthClienter_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClienter_SubscribeNewHead_Call { - return &EthClienter_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} -} - -func (_c *EthClienter_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClienter_SubscribeNewHead_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(chan<- *types.Header)) - }) - return _c -} - -func (_c *EthClienter_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClienter_SubscribeNewHead_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClienter_SubscribeNewHead_Call { - _c.Call.Return(run) - return _c -} - -// SuggestGasPrice provides a mock function with given fields: ctx -func (_m *EthClienter) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasPrice") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' -type EthClienter_SuggestGasPrice_Call struct { - *mock.Call -} - -// SuggestGasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClienter_Expecter) SuggestGasPrice(ctx interface{}) *EthClienter_SuggestGasPrice_Call { - return &EthClienter_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} -} - -func (_c *EthClienter_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClienter_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasPrice_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasPrice_Call { - _c.Call.Return(run) - return _c -} - -// SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *EthClienter) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasTipCap") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' -type EthClienter_SuggestGasTipCap_Call struct { - *mock.Call -} - -// SuggestGasTipCap is a helper method to define mock.On call -// - ctx context.Context -func (_e *EthClienter_Expecter) SuggestGasTipCap(ctx interface{}) *EthClienter_SuggestGasTipCap_Call { - return &EthClienter_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} -} - -func (_c *EthClienter_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *EthClienter_SuggestGasTipCap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *EthClienter_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *EthClienter_SuggestGasTipCap_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *EthClienter_SuggestGasTipCap_Call { - _c.Call.Return(run) - return _c -} - -// TransactionCount provides a mock function with given fields: ctx, blockHash -func (_m *EthClienter) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { - ret := _m.Called(ctx, blockHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionCount") - } - - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { - return rf(ctx, blockHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { - r0 = rf(ctx, blockHash) - } else { - r0 = ret.Get(0).(uint) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, blockHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' -type EthClienter_TransactionCount_Call struct { - *mock.Call -} - -// TransactionCount is a helper method to define mock.On call -// - ctx context.Context -// - blockHash common.Hash -func (_e *EthClienter_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *EthClienter_TransactionCount_Call { - return &EthClienter_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} -} - -func (_c *EthClienter_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *EthClienter_TransactionCount_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClienter_TransactionCount_Call) Return(_a0 uint, _a1 error) *EthClienter_TransactionCount_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *EthClienter_TransactionCount_Call { - _c.Call.Return(run) - return _c -} - -// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index -func (_m *EthClienter) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { - ret := _m.Called(ctx, blockHash, index) - - if len(ret) == 0 { - panic("no return value specified for TransactionInBlock") - } - - var r0 *types.Transaction - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { - return rf(ctx, blockHash, index) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { - r0 = rf(ctx, blockHash, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Transaction) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { - r1 = rf(ctx, blockHash, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClienter_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' -type EthClienter_TransactionInBlock_Call struct { - *mock.Call -} - -// TransactionInBlock is a helper method to define mock.On call -// - ctx context.Context -// - blockHash common.Hash -// - index uint -func (_e *EthClienter_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *EthClienter_TransactionInBlock_Call { - return &EthClienter_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} -} - -func (_c *EthClienter_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *EthClienter_TransactionInBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) - }) - return _c -} - -func (_c *EthClienter_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *EthClienter_TransactionInBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClienter_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *EthClienter_TransactionInBlock_Call { - _c.Call.Return(run) - return _c -} - -// NewEthClienter creates a new instance of EthClienter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthClienter(t interface { - mock.TestingT - Cleanup(func()) -}) *EthClienter { - mock := &EthClienter{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/l1infotreesync/mocks/mock_reorgdetector.go b/l1infotreesync/mocks/mock_reorgdetector.go deleted file mode 100644 index 74a8afc3a..000000000 --- a/l1infotreesync/mocks/mock_reorgdetector.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks_l1infotreesync - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - mock "github.com/stretchr/testify/mock" - - reorgdetector "github.com/0xPolygon/cdk/reorgdetector" -) - -// ReorgDetectorMock is an autogenerated mock type for the ReorgDetector type -type ReorgDetectorMock struct { - mock.Mock -} - -type ReorgDetectorMock_Expecter struct { - mock *mock.Mock -} - -func (_m *ReorgDetectorMock) EXPECT() *ReorgDetectorMock_Expecter { - return &ReorgDetectorMock_Expecter{mock: &_m.Mock} -} - -// AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash -func (_m *ReorgDetectorMock) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { - ret := _m.Called(ctx, id, blockNum, blockHash) - - if len(ret) == 0 { - panic("no return value specified for AddBlockToTrack") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint64, common.Hash) error); ok { - r0 = rf(ctx, id, blockNum, blockHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ReorgDetectorMock_AddBlockToTrack_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBlockToTrack' -type ReorgDetectorMock_AddBlockToTrack_Call struct { - *mock.Call -} - -// AddBlockToTrack is a helper method to define mock.On call -// - ctx context.Context -// - id string -// - blockNum uint64 -// - blockHash common.Hash -func (_e *ReorgDetectorMock_Expecter) AddBlockToTrack(ctx interface{}, id interface{}, blockNum interface{}, blockHash interface{}) *ReorgDetectorMock_AddBlockToTrack_Call { - return &ReorgDetectorMock_AddBlockToTrack_Call{Call: _e.mock.On("AddBlockToTrack", ctx, id, blockNum, blockHash)} -} - -func (_c *ReorgDetectorMock_AddBlockToTrack_Call) Run(run func(ctx context.Context, id string, blockNum uint64, blockHash common.Hash)) *ReorgDetectorMock_AddBlockToTrack_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(common.Hash)) - }) - return _c -} - -func (_c *ReorgDetectorMock_AddBlockToTrack_Call) Return(_a0 error) *ReorgDetectorMock_AddBlockToTrack_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ReorgDetectorMock_AddBlockToTrack_Call) RunAndReturn(run func(context.Context, string, uint64, common.Hash) error) *ReorgDetectorMock_AddBlockToTrack_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *ReorgDetectorMock) Subscribe(id string) (*reorgdetector.Subscription, error) { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 *reorgdetector.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { - return rf(id) - } - if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*reorgdetector.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ReorgDetectorMock_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type ReorgDetectorMock_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *ReorgDetectorMock_Expecter) Subscribe(id interface{}) *ReorgDetectorMock_Subscribe_Call { - return &ReorgDetectorMock_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *ReorgDetectorMock_Subscribe_Call) Run(run func(id string)) *ReorgDetectorMock_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *ReorgDetectorMock_Subscribe_Call) Return(_a0 *reorgdetector.Subscription, _a1 error) *ReorgDetectorMock_Subscribe_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ReorgDetectorMock_Subscribe_Call) RunAndReturn(run func(string) (*reorgdetector.Subscription, error)) *ReorgDetectorMock_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewReorgDetectorMock creates a new instance of ReorgDetectorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewReorgDetectorMock(t interface { - mock.TestingT - Cleanup(func()) -}) *ReorgDetectorMock { - mock := &ReorgDetectorMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/l1infotreesync/processor.go b/l1infotreesync/processor.go deleted file mode 100644 index e0cc19599..000000000 --- a/l1infotreesync/processor.go +++ /dev/null @@ -1,489 +0,0 @@ -package l1infotreesync - -import ( - "context" - "database/sql" - "encoding/binary" - "errors" - "fmt" - mutex "sync" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/l1infotreesync/migrations" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" - treeTypes "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/iden3/go-iden3-crypto/keccak256" - "github.com/russross/meddler" - "golang.org/x/crypto/sha3" -) - -var ( - ErrBlockNotProcessed = errors.New("given block(s) have not been processed yet") - ErrNoBlock0 = errors.New("blockNum must be greater than 0") -) - -type processor struct { - db *sql.DB - l1InfoTree *tree.AppendOnlyTree - rollupExitTree *tree.UpdatableTree - mu mutex.RWMutex - halted bool - haltedReason string - log *log.Logger -} - -// UpdateL1InfoTree representation of the UpdateL1InfoTree event -type UpdateL1InfoTree struct { - BlockPosition uint64 - MainnetExitRoot common.Hash - RollupExitRoot common.Hash - ParentHash common.Hash - Timestamp uint64 -} - -type UpdateL1InfoTreeV2 struct { - CurrentL1InfoRoot common.Hash - LeafCount uint32 - Blockhash common.Hash - MinTimestamp uint64 -} - -// VerifyBatches representation of the VerifyBatches and VerifyBatchesTrustedAggregator events -type VerifyBatches struct { - BlockNumber uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_pos"` - RollupID uint32 `meddler:"rollup_id"` - NumBatch uint64 `meddler:"batch_num"` - StateRoot common.Hash `meddler:"state_root,hash"` - ExitRoot common.Hash `meddler:"exit_root,hash"` - Aggregator common.Address `meddler:"aggregator,address"` - - // Not provided by downloader - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` -} - -func (v *VerifyBatches) String() string { - return fmt.Sprintf("BlockNumber: %d, BlockPosition: %d, RollupID: %d, NumBatch: %d, StateRoot: %s, "+ - "ExitRoot: %s, Aggregator: %s, RollupExitRoot: %s", - v.BlockNumber, v.BlockPosition, v.RollupID, v.NumBatch, v.StateRoot.String(), - v.ExitRoot.String(), v.Aggregator.String(), v.RollupExitRoot.String()) -} - -type InitL1InfoRootMap struct { - LeafCount uint32 - CurrentL1InfoRoot common.Hash -} - -func (i *InitL1InfoRootMap) String() string { - return fmt.Sprintf("LeafCount: %d, CurrentL1InfoRoot: %s", i.LeafCount, i.CurrentL1InfoRoot.String()) -} - -type Event struct { - UpdateL1InfoTree *UpdateL1InfoTree - UpdateL1InfoTreeV2 *UpdateL1InfoTreeV2 - VerifyBatches *VerifyBatches - InitL1InfoRootMap *InitL1InfoRootMap -} - -// L1InfoTreeLeaf representation of a leaf of the L1 Info tree -type L1InfoTreeLeaf struct { - BlockNumber uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_pos"` - L1InfoTreeIndex uint32 `meddler:"position"` - PreviousBlockHash common.Hash `meddler:"previous_block_hash,hash"` - Timestamp uint64 `meddler:"timestamp"` - MainnetExitRoot common.Hash `meddler:"mainnet_exit_root,hash"` - RollupExitRoot common.Hash `meddler:"rollup_exit_root,hash"` - GlobalExitRoot common.Hash `meddler:"global_exit_root,hash"` - Hash common.Hash `meddler:"hash,hash"` -} - -func (l *L1InfoTreeLeaf) String() string { - return fmt.Sprintf("BlockNumber: %d, BlockPosition: %d, L1InfoTreeIndex: %d, PreviousBlockHash: %s, "+ - "Timestamp: %d, MainnetExitRoot: %s, RollupExitRoot: %s, GlobalExitRoot: %s, Hash: %s", - l.BlockNumber, l.BlockPosition, l.L1InfoTreeIndex, l.PreviousBlockHash.String(), - l.Timestamp, l.MainnetExitRoot.String(), l.RollupExitRoot.String(), l.GlobalExitRoot.String(), l.Hash.String()) -} - -// L1InfoTreeInitial representation of the initial info of the L1 Info tree for this rollup -type L1InfoTreeInitial struct { - BlockNumber uint64 `meddler:"block_num"` - LeafCount uint32 `meddler:"leaf_count"` - L1InfoRoot common.Hash `meddler:"l1_info_root,hash"` -} - -func (l *L1InfoTreeInitial) String() string { - return fmt.Sprintf("BlockNumber: %d, LeafCount: %d, L1InfoRoot: %s", l.BlockNumber, l.LeafCount, l.L1InfoRoot.String()) -} - -// Hash as expected by the tree -func (l *L1InfoTreeLeaf) hash() common.Hash { - var res [treeTypes.DefaultHeight]byte - t := make([]byte, 8) //nolint:mnd - binary.BigEndian.PutUint64(t, l.Timestamp) - copy(res[:], keccak256.Hash(l.globalExitRoot().Bytes(), l.PreviousBlockHash.Bytes(), t)) - return res -} - -// GlobalExitRoot returns the GER -func (l *L1InfoTreeLeaf) globalExitRoot() common.Hash { - var gerBytes [treeTypes.DefaultHeight]byte - hasher := sha3.NewLegacyKeccak256() - hasher.Write(l.MainnetExitRoot[:]) - hasher.Write(l.RollupExitRoot[:]) - copy(gerBytes[:], hasher.Sum(nil)) - - return gerBytes -} - -func newProcessor(dbPath string) (*processor, error) { - err := migrations.RunMigrations(dbPath) - if err != nil { - return nil, err - } - db, err := db.NewSQLiteDB(dbPath) - if err != nil { - return nil, err - } - return &processor{ - db: db, - l1InfoTree: tree.NewAppendOnlyTree(db, migrations.L1InfoTreePrefix), - rollupExitTree: tree.NewUpdatableTree(db, migrations.RollupExitTreePrefix), - log: log.WithFields("processor", "l1infotreesync"), - }, nil -} - -// GetL1InfoTreeMerkleProof creates a merkle proof for the L1 Info tree -func (p *processor) GetL1InfoTreeMerkleProof( - ctx context.Context, index uint32, -) (treeTypes.Proof, treeTypes.Root, error) { - root, err := p.l1InfoTree.GetRootByIndex(ctx, index) - if err != nil { - return treeTypes.Proof{}, treeTypes.Root{}, err - } - proof, err := p.l1InfoTree.GetProof(ctx, root.Index, root.Hash) - return proof, root, err -} - -// GetLatestInfoUntilBlock returns the most recent L1InfoTreeLeaf that occurred before or at blockNum. -// If the blockNum has not been processed yet the error ErrBlockNotProcessed will be returned -func (p *processor) GetLatestInfoUntilBlock(ctx context.Context, blockNum uint64) (*L1InfoTreeLeaf, error) { - if blockNum == 0 { - return nil, ErrNoBlock0 - } - tx, err := p.db.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) - if err != nil { - return nil, err - } - defer func() { - if err := tx.Rollback(); err != nil { - p.log.Warnf("error rolling back tx: %v", err) - } - }() - - lpb, err := p.getLastProcessedBlockWithTx(tx) - if err != nil { - return nil, err - } - if lpb < blockNum { - return nil, ErrBlockNotProcessed - } - - info := &L1InfoTreeLeaf{} - err = meddler.QueryRow( - tx, info, - `SELECT * FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;`, - ) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, db.ErrNotFound - } - return nil, err - } - return info, nil -} - -// GetInfoByIndex returns the value of a leaf (not the hash) of the L1 info tree -func (p *processor) GetInfoByIndex(ctx context.Context, index uint32) (*L1InfoTreeLeaf, error) { - return p.getInfoByIndexWithTx(p.db, index) -} - -func (p *processor) getInfoByIndexWithTx(tx db.DBer, index uint32) (*L1InfoTreeLeaf, error) { - info := &L1InfoTreeLeaf{} - return info, meddler.QueryRow( - tx, info, - `SELECT * FROM l1info_leaf WHERE position = $1;`, index, - ) -} - -// GetLastProcessedBlock returns the last processed block -func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - return p.getLastProcessedBlockWithTx(p.db) -} - -func (p *processor) getLastProcessedBlockWithTx(tx db.Querier) (uint64, error) { - var lastProcessedBlock uint64 - row := tx.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") - err := row.Scan(&lastProcessedBlock) - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - return lastProcessedBlock, err -} - -// Reorg triggers a purge and reset process on the processor to leaf it on a state -// as if the last block processed was firstReorgedBlock-1 -func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - p.log.Infof("reorging to block %d", firstReorgedBlock) - - tx, err := db.NewTx(ctx, p.db) - if err != nil { - return err - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRllbck := tx.Rollback(); errRllbck != nil { - p.log.Errorf("error while rolling back tx %v", errRllbck) - } - } - }() - - res, err := tx.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) - if err != nil { - return err - } - - if err = p.l1InfoTree.Reorg(tx, firstReorgedBlock); err != nil { - return err - } - - if err = p.rollupExitTree.Reorg(tx, firstReorgedBlock); err != nil { - return err - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return err - } - - if err := tx.Commit(); err != nil { - return err - } - - p.log.Infof("reorged to block %d, %d rows affected", firstReorgedBlock, rowsAffected) - - shouldRollback = false - - sync.UnhaltIfAffectedRows(&p.halted, &p.haltedReason, &p.mu, rowsAffected) - return nil -} - -// ProcessBlock process the events of the block to build the rollup exit tree and the l1 info tree -// and updates the last processed block (can be called without events for that purpose) -func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - if p.isHalted() { - p.log.Errorf("processor is halted due to: %s", p.haltedReason) - return sync.ErrInconsistentState - } - tx, err := db.NewTx(ctx, p.db) - if err != nil { - return err - } - p.log.Debugf("init block processing for block %d", block.Num) - shouldRollback := true - defer func() { - if shouldRollback { - p.log.Debugf("rolling back block processing for block %d", block.Num) - if errRllbck := tx.Rollback(); errRllbck != nil { - p.log.Errorf("error while rolling back tx %v", errRllbck) - } - } - }() - - if _, err := tx.Exec(`INSERT INTO block (num, hash) VALUES ($1, $2)`, block.Num, block.Hash.String()); err != nil { - return fmt.Errorf("insert Block. err: %w", err) - } - - var initialL1InfoIndex uint32 - var l1InfoLeavesAdded uint32 - lastIndex, err := p.getLastIndex(tx) - - switch { - case errors.Is(err, db.ErrNotFound): - initialL1InfoIndex = 0 - case err != nil: - return fmt.Errorf("getLastIndex err: %w", err) - default: - initialL1InfoIndex = lastIndex + 1 - } - - for _, e := range block.Events { - event, ok := e.(Event) - if !ok { - return errors.New("failed to convert from sync.Block.Event into Event") - } - if event.UpdateL1InfoTree != nil { - index := initialL1InfoIndex + l1InfoLeavesAdded - info := &L1InfoTreeLeaf{ - BlockNumber: block.Num, - BlockPosition: event.UpdateL1InfoTree.BlockPosition, - L1InfoTreeIndex: index, - PreviousBlockHash: event.UpdateL1InfoTree.ParentHash, - Timestamp: event.UpdateL1InfoTree.Timestamp, - MainnetExitRoot: event.UpdateL1InfoTree.MainnetExitRoot, - RollupExitRoot: event.UpdateL1InfoTree.RollupExitRoot, - } - info.GlobalExitRoot = info.globalExitRoot() - info.Hash = info.hash() - if err = meddler.Insert(tx, "l1info_leaf", info); err != nil { - return fmt.Errorf("insert l1info_leaf %s. err: %w", info.String(), err) - } - - err = p.l1InfoTree.AddLeaf(tx, info.BlockNumber, info.BlockPosition, treeTypes.Leaf{ - Index: info.L1InfoTreeIndex, - Hash: info.Hash, - }) - if err != nil { - return fmt.Errorf("AddLeaf(%s). err: %w", info.String(), err) - } - p.log.Infof("inserted L1InfoTreeLeaf %s", info.String()) - l1InfoLeavesAdded++ - } - if event.UpdateL1InfoTreeV2 != nil { - p.log.Infof("handle UpdateL1InfoTreeV2 event. Block: %d, block hash: %s. Event root: %s. Event leaf count: %d.", - block.Num, block.Hash, event.UpdateL1InfoTreeV2.CurrentL1InfoRoot.String(), event.UpdateL1InfoTreeV2.LeafCount) - - root, err := p.l1InfoTree.GetLastRoot(tx) - if err != nil { - return fmt.Errorf("GetLastRoot(). err: %w", err) - } - // If the sanity check fails, halt the syncer and rollback. The sanity check could have - // failed due to a reorg. Hopefully, this is the case, eventually the reorg will get detected, - // and the syncer will get unhalted. Otherwise, this means that the syncer has an inconsistent state - // compared to the contracts, and this will need manual intervention. - if root.Hash != event.UpdateL1InfoTreeV2.CurrentL1InfoRoot || root.Index+1 != event.UpdateL1InfoTreeV2.LeafCount { - errStr := fmt.Sprintf( - "failed to check UpdateL1InfoTreeV2. Root: %s vs event: %s. "+ - "Index: %d vs event.LeafCount: %d. Happened on block %d", - root.Hash, event.UpdateL1InfoTreeV2.CurrentL1InfoRoot.String(), - root.Index, event.UpdateL1InfoTreeV2.LeafCount, - block.Num, - ) - p.log.Error(errStr) - p.mu.Lock() - p.haltedReason = errStr - p.halted = true - p.mu.Unlock() - return sync.ErrInconsistentState - } - } - if event.VerifyBatches != nil { - p.log.Debugf("handle VerifyBatches event %s", event.VerifyBatches.String()) - err = p.processVerifyBatches(tx, block.Num, event.VerifyBatches) - if err != nil { - err = fmt.Errorf("processVerifyBatches. err: %w", err) - p.log.Errorf("error processing VerifyBatches: %v", err) - return err - } - } - - if event.InitL1InfoRootMap != nil { - p.log.Debugf("handle InitL1InfoRootMap event %s", event.InitL1InfoRootMap.String()) - err = processEventInitL1InfoRootMap(tx, block.Num, event.InitL1InfoRootMap) - if err != nil { - err = fmt.Errorf("initL1InfoRootMap. Err: %w", err) - p.log.Errorf("error processing InitL1InfoRootMap: %v", err) - return err - } - } - } - - if err := tx.Commit(); err != nil { - return fmt.Errorf("err: %w", err) - } - shouldRollback = false - logFunc := p.log.Debugf - if len(block.Events) > 0 { - logFunc = p.log.Infof - } - logFunc("block %d processed with %d events", block.Num, len(block.Events)) - return nil -} - -func (p *processor) getLastIndex(tx db.Querier) (uint32, error) { - var lastProcessedIndex uint32 - row := tx.QueryRow("SELECT position FROM l1info_leaf ORDER BY block_num DESC, block_pos DESC LIMIT 1;") - err := row.Scan(&lastProcessedIndex) - if errors.Is(err, sql.ErrNoRows) { - return 0, db.ErrNotFound - } - return lastProcessedIndex, err -} - -func (p *processor) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*L1InfoTreeLeaf, error) { - info := &L1InfoTreeLeaf{} - err := meddler.QueryRow(p.db, info, ` - SELECT * FROM l1info_leaf - WHERE rollup_exit_root = $1 - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `, rollupExitRoot.Hex()) - return info, db.ReturnErrNotFound(err) -} - -func (p *processor) GetLastInfo() (*L1InfoTreeLeaf, error) { - info := &L1InfoTreeLeaf{} - err := meddler.QueryRow(p.db, info, ` - SELECT * FROM l1info_leaf - ORDER BY block_num DESC, block_pos DESC - LIMIT 1; - `) - return info, db.ReturnErrNotFound(err) -} - -func (p *processor) GetFirstInfo() (*L1InfoTreeLeaf, error) { - info := &L1InfoTreeLeaf{} - err := meddler.QueryRow(p.db, info, ` - SELECT * FROM l1info_leaf - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `) - return info, db.ReturnErrNotFound(err) -} - -func (p *processor) GetFirstInfoAfterBlock(blockNum uint64) (*L1InfoTreeLeaf, error) { - info := &L1InfoTreeLeaf{} - err := meddler.QueryRow(p.db, info, ` - SELECT * FROM l1info_leaf - WHERE block_num >= $1 - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `, blockNum) - return info, db.ReturnErrNotFound(err) -} - -func (p *processor) GetInfoByGlobalExitRoot(ger common.Hash) (*L1InfoTreeLeaf, error) { - info := &L1InfoTreeLeaf{} - err := meddler.QueryRow(p.db, info, ` - SELECT * FROM l1info_leaf - WHERE global_exit_root = $1 - LIMIT 1; - `, ger.String()) - return info, db.ReturnErrNotFound(err) -} - -func (p *processor) getDBQuerier(tx db.Txer) db.Querier { - if tx != nil { - return tx - } - return p.db -} - -func (p *processor) isHalted() bool { - p.mu.RLock() - defer p.mu.RUnlock() - return p.halted -} diff --git a/l1infotreesync/processor_initl1inforootmap.go b/l1infotreesync/processor_initl1inforootmap.go deleted file mode 100644 index 92732cd94..000000000 --- a/l1infotreesync/processor_initl1inforootmap.go +++ /dev/null @@ -1,37 +0,0 @@ -package l1infotreesync - -import ( - "database/sql" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/russross/meddler" -) - -func processEventInitL1InfoRootMap(tx db.Txer, blockNumber uint64, event *InitL1InfoRootMap) error { - if event == nil { - return nil - } - info := &L1InfoTreeInitial{ - BlockNumber: blockNumber, - LeafCount: event.LeafCount, - L1InfoRoot: event.CurrentL1InfoRoot, - } - log.Infof("insert InitL1InfoRootMap %s ", info.String()) - if err := meddler.Insert(tx, "l1info_initial", info); err != nil { - return fmt.Errorf("err: %w", err) - } - return nil -} - -// GetInitL1InfoRootMap returns the initial L1 info root map, nil if no root map has been set -func (p *processor) GetInitL1InfoRootMap(tx db.Txer) (*L1InfoTreeInitial, error) { - info := &L1InfoTreeInitial{} - err := meddler.QueryRow(p.getDBQuerier(tx), info, `SELECT block_num, leaf_count,l1_info_root FROM l1info_initial`) - if errors.Is(err, sql.ErrNoRows) { - return nil, nil - } - return info, err -} diff --git a/l1infotreesync/processor_initl1inforootmap_test.go b/l1infotreesync/processor_initl1inforootmap_test.go deleted file mode 100644 index 9d2284652..000000000 --- a/l1infotreesync/processor_initl1inforootmap_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package l1infotreesync - -import ( - "context" - "path" - "testing" - - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestInitL1InfoRootMap(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestInitL1InfoRootMap.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.TODO() - event := InitL1InfoRootMap{ - LeafCount: 1, - CurrentL1InfoRoot: common.HexToHash("beef"), - } - block := sync.Block{ - Num: 1, - Events: []interface{}{ - Event{InitL1InfoRootMap: &event}, - }, - } - - err = sut.ProcessBlock(ctx, block) - require.NoError(t, err) - - info, err := sut.GetInitL1InfoRootMap(nil) - require.NoError(t, err) - require.NotNil(t, info) - require.Equal(t, event.LeafCount, info.LeafCount) - require.Equal(t, event.CurrentL1InfoRoot, info.L1InfoRoot) - require.Equal(t, block.Num, info.BlockNumber) -} - -func TestInitL1InfoRootMapDontAllow2Rows(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestInitL1InfoRootMapDontAllow2Rows.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.TODO() - block := sync.Block{ - Num: 1, - Events: []interface{}{ - Event{InitL1InfoRootMap: &InitL1InfoRootMap{ - LeafCount: 1, - CurrentL1InfoRoot: common.HexToHash("beef"), - }}, - }, - } - err = sut.ProcessBlock(ctx, block) - require.NoError(t, err) - block.Num = 2 - err = sut.ProcessBlock(ctx, block) - require.Error(t, err, "should not allow to insert a second row") -} - -func TestGetInitL1InfoRootMap(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetInitL1InfoRootMap.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - info, err := sut.GetInitL1InfoRootMap(nil) - require.NoError(t, err, "should return no error if no row is present, because it returns data=nil") - require.Nil(t, info, "should return nil if no row is present") -} diff --git a/l1infotreesync/processor_test.go b/l1infotreesync/processor_test.go deleted file mode 100644 index df0b8444e..000000000 --- a/l1infotreesync/processor_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package l1infotreesync - -import ( - "fmt" - "path" - "testing" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/l1infotree" - "github.com/0xPolygon/cdk/l1infotreesync/migrations" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - "github.com/0xPolygon/cdk/tree" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" -) - -func TestGetInfo(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetInfo.sqlite") - p, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - - // Test ErrNotFound returned correctly on all methods - _, err = p.GetFirstL1InfoWithRollupExitRoot(common.Hash{}) - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetLastInfo() - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetFirstInfo() - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetFirstInfoAfterBlock(0) - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetInfoByGlobalExitRoot(common.Hash{}) - require.Equal(t, db.ErrNotFound, err) - - // First insert - info1 := &UpdateL1InfoTree{ - MainnetExitRoot: common.HexToHash("beef"), - RollupExitRoot: common.HexToHash("5ca1e"), - ParentHash: common.HexToHash("1010101"), - Timestamp: 420, - } - expected1 := L1InfoTreeLeaf{ - BlockNumber: 1, - L1InfoTreeIndex: 0, - PreviousBlockHash: info1.ParentHash, - Timestamp: info1.Timestamp, - MainnetExitRoot: info1.MainnetExitRoot, - RollupExitRoot: info1.RollupExitRoot, - } - expected1.GlobalExitRoot = expected1.globalExitRoot() - expected1.Hash = expected1.hash() - err = p.ProcessBlock(ctx, sync.Block{ - Num: 1, - Events: []interface{}{ - Event{UpdateL1InfoTree: info1}, - }, - }) - require.NoError(t, err) - actual, err := p.GetFirstL1InfoWithRollupExitRoot(info1.RollupExitRoot) - require.NoError(t, err) - require.Equal(t, expected1, *actual) - actual, err = p.GetLastInfo() - require.NoError(t, err) - require.Equal(t, expected1, *actual) - actual, err = p.GetFirstInfo() - require.NoError(t, err) - require.Equal(t, expected1, *actual) - actual, err = p.GetFirstInfoAfterBlock(0) - require.NoError(t, err) - require.Equal(t, expected1, *actual) - actual, err = p.GetInfoByGlobalExitRoot(expected1.GlobalExitRoot) - require.NoError(t, err) - require.Equal(t, expected1, *actual) - - // Second insert - info2 := &UpdateL1InfoTree{ - MainnetExitRoot: common.HexToHash("b055"), - RollupExitRoot: common.HexToHash("5ca1e"), - ParentHash: common.HexToHash("1010101"), - Timestamp: 420, - } - expected2 := L1InfoTreeLeaf{ - BlockNumber: 2, - L1InfoTreeIndex: 1, - PreviousBlockHash: info2.ParentHash, - Timestamp: info2.Timestamp, - MainnetExitRoot: info2.MainnetExitRoot, - RollupExitRoot: info2.RollupExitRoot, - } - expected2.GlobalExitRoot = expected2.globalExitRoot() - expected2.Hash = expected2.hash() - err = p.ProcessBlock(ctx, sync.Block{ - Num: 2, - Events: []interface{}{ - Event{UpdateL1InfoTree: info2}, - }, - }) - require.NoError(t, err) - actual, err = p.GetFirstL1InfoWithRollupExitRoot(info2.RollupExitRoot) - require.NoError(t, err) - require.Equal(t, expected1, *actual) - actual, err = p.GetLastInfo() - require.NoError(t, err) - require.Equal(t, expected2, *actual) - actual, err = p.GetFirstInfo() - require.NoError(t, err) - require.Equal(t, expected1, *actual) - actual, err = p.GetFirstInfoAfterBlock(2) - require.NoError(t, err) - require.Equal(t, expected2, *actual) - actual, err = p.GetInfoByGlobalExitRoot(expected2.GlobalExitRoot) - require.NoError(t, err) - require.Equal(t, expected2, *actual) -} - -func TestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetLatestInfoUntilBlockIfNotFoundReturnsErrNotFound.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - // Fake block 1 - _, err = sut.db.Exec(`INSERT INTO block (num) VALUES ($1)`, 1) - require.NoError(t, err) - - _, err = sut.GetLatestInfoUntilBlock(ctx, 1) - require.Equal(t, db.ErrNotFound, err) -} - -func Test_processor_GetL1InfoTreeMerkleProof(t *testing.T) { - testTable := []struct { - name string - getProcessor func(t *testing.T) *processor - idx uint32 - expectedRoot types.Root - expectedErr error - }{ - { - name: "empty tree", - getProcessor: func(t *testing.T) *processor { - t.Helper() - - p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_GetL1InfoTreeMerkleProof_1.sqlite")) - require.NoError(t, err) - - return p - }, - idx: 0, - expectedErr: db.ErrNotFound, - }, - { - name: "single leaf tree", - getProcessor: func(t *testing.T) *processor { - t.Helper() - - p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_GetL1InfoTreeMerkleProof_2.sqlite")) - require.NoError(t, err) - - info := &UpdateL1InfoTree{ - MainnetExitRoot: common.HexToHash("beef"), - RollupExitRoot: common.HexToHash("5ca1e"), - ParentHash: common.HexToHash("1010101"), - Timestamp: 420, - } - err = p.ProcessBlock(context.Background(), sync.Block{ - Num: 1, - Events: []interface{}{ - Event{UpdateL1InfoTree: info}, - }, - }) - require.NoError(t, err) - - return p - }, - idx: 0, - expectedRoot: types.Root{ - Hash: common.HexToHash("beef"), - Index: 0, - BlockNum: 1, - BlockPosition: 0, - }, - }, - } - - for _, tt := range testTable { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - p := tt.getProcessor(t) - proof, root, err := p.GetL1InfoTreeMerkleProof(context.Background(), tt.idx) - if tt.expectedErr != nil { - require.Equal(t, tt.expectedErr, err) - } else { - require.NoError(t, err) - require.NotEmpty(t, proof) - require.NotEmpty(t, root.Hash) - require.Equal(t, tt.expectedRoot.Index, root.Index) - require.Equal(t, tt.expectedRoot.BlockNum, root.BlockNum) - require.Equal(t, tt.expectedRoot.BlockPosition, root.BlockPosition) - } - }) - } -} - -func Test_processor_Reorg(t *testing.T) { - t.Parallel() - - testTable := []struct { - name string - getProcessor func(t *testing.T) *processor - reorgBlock uint64 - expectedErr error - }{ - { - name: "empty tree", - getProcessor: func(t *testing.T) *processor { - t.Helper() - - p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_Reorg_1.sqlite")) - require.NoError(t, err) - return p - }, - reorgBlock: 0, - expectedErr: nil, - }, - { - name: "single leaf tree", - getProcessor: func(t *testing.T) *processor { - t.Helper() - - p, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTest_processor_Reorg_2.sqlite")) - require.NoError(t, err) - - info := &UpdateL1InfoTree{ - MainnetExitRoot: common.HexToHash("beef"), - RollupExitRoot: common.HexToHash("5ca1e"), - ParentHash: common.HexToHash("1010101"), - Timestamp: 420, - } - err = p.ProcessBlock(context.Background(), sync.Block{ - Num: 1, - Events: []interface{}{ - Event{UpdateL1InfoTree: info}, - }, - }) - require.NoError(t, err) - - return p - }, - reorgBlock: 1, - }, - } - - for _, tt := range testTable { - tt := tt - - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - - p := tt.getProcessor(t) - err := p.Reorg(context.Background(), tt.reorgBlock) - if tt.expectedErr != nil { - require.Equal(t, tt.expectedErr, err) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestProofsFromDifferentTrees(t *testing.T) { - fmt.Println("aggregator L1InfoTree ===============================================") - - l1Tree, err := l1infotree.NewL1InfoTree(log.WithFields("test"), types.DefaultHeight, [][32]byte{}) - require.NoError(t, err) - - leaves := createTestLeaves(t, 2) - - aLeaves := make([][32]byte, len(leaves)) - for i, leaf := range leaves { - aLeaves[i] = l1infotree.HashLeafData( - leaf.GlobalExitRoot, - leaf.PreviousBlockHash, - leaf.Timestamp) - } - - aggregatorL1InfoTree, aggregatorRoot, err := l1Tree.ComputeMerkleProof(leaves[0].L1InfoTreeIndex, aLeaves) - require.NoError(t, err) - - aggregatorProof := types.Proof{} - for i, p := range aggregatorL1InfoTree { - aggregatorProof[i] = common.BytesToHash(p[:]) - } - - fmt.Println(aggregatorRoot) - fmt.Println(aggregatorProof) - fmt.Println("l1 info tree syncer L1InfoTree ===============================================") - - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProofsFromDifferentTrees.sqlite") - require.NoError(t, migrations.RunMigrations(dbPath)) - - dbe, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - - l1InfoTree := tree.NewAppendOnlyTree(dbe, migrations.L1InfoTreePrefix) - - tx, err := db.NewTx(context.Background(), dbe) - require.NoError(t, err) - - for _, leaf := range leaves { - err = l1InfoTree.AddLeaf(tx, leaf.BlockNumber, leaf.BlockPosition, types.Leaf{ - Index: leaf.L1InfoTreeIndex, - Hash: leaf.Hash, - }) - - require.NoError(t, err) - } - - require.NoError(t, tx.Commit()) - - l1InfoTreeSyncerRoot, err := l1InfoTree.GetRootByIndex(context.Background(), leaves[1].L1InfoTreeIndex) - require.NoError(t, err) - l1InfoTreeSyncerProof, err := l1InfoTree.GetProof(context.Background(), leaves[0].L1InfoTreeIndex, l1InfoTreeSyncerRoot.Hash) - require.NoError(t, err) - for i, l := range aggregatorL1InfoTree { - require.Equal(t, common.Hash(l), l1InfoTreeSyncerProof[i]) - } - - fmt.Println(leaves[0].GlobalExitRoot) - fmt.Println(l1InfoTreeSyncerProof) - - require.Equal(t, aggregatorRoot, l1InfoTreeSyncerRoot.Hash) - require.Equal(t, aggregatorProof, l1InfoTreeSyncerProof) -} - -func createTestLeaves(t *testing.T, numOfLeaves int) []*L1InfoTreeLeaf { - t.Helper() - - leaves := make([]*L1InfoTreeLeaf, 0, numOfLeaves) - - for i := 0; i < numOfLeaves; i++ { - leaf := &L1InfoTreeLeaf{ - L1InfoTreeIndex: uint32(i), - Timestamp: uint64(i), - BlockNumber: uint64(i), - BlockPosition: uint64(i), - PreviousBlockHash: common.HexToHash(fmt.Sprintf("0x%x", i)), - MainnetExitRoot: common.HexToHash(fmt.Sprintf("0x%x", i)), - RollupExitRoot: common.HexToHash(fmt.Sprintf("0x%x", i)), - } - - leaf.GlobalExitRoot = leaf.globalExitRoot() - leaf.Hash = leaf.hash() - - leaves = append(leaves, leaf) - } - - return leaves -} - -func TestProcessBlockUpdateL1InfoTreeV2DontMatchTree(t *testing.T) { - sut, err := newProcessor(path.Join(t.TempDir(), "l1infotreesyncTestProcessBlockUpdateL1InfoTreeV2DontMatchTree.sqlite")) - require.NoError(t, err) - block := sync.Block{ - Num: 10, - Events: []interface{}{ - Event{UpdateL1InfoTree: &UpdateL1InfoTree{ - MainnetExitRoot: common.HexToHash("beef"), - RollupExitRoot: common.HexToHash("5ca1e"), - ParentHash: common.HexToHash("1010101"), - Timestamp: 420, - }}, - Event{UpdateL1InfoTreeV2: &UpdateL1InfoTreeV2{ - CurrentL1InfoRoot: common.HexToHash("beef"), - LeafCount: 1, - }}, - }, - } - err = sut.ProcessBlock(context.Background(), block) - require.ErrorIs(t, err, sync.ErrInconsistentState) - require.True(t, sut.halted) -} diff --git a/l1infotreesync/processor_verifybatches.go b/l1infotreesync/processor_verifybatches.go deleted file mode 100644 index 9d1d0efba..000000000 --- a/l1infotreesync/processor_verifybatches.go +++ /dev/null @@ -1,104 +0,0 @@ -package l1infotreesync - -import ( - "errors" - "fmt" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - treeTypes "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" -) - -func (p *processor) processVerifyBatches(tx db.Txer, blockNumber uint64, event *VerifyBatches) error { - if event == nil { - return fmt.Errorf("processVerifyBatches: event is nil") - } - if tx == nil { - return fmt.Errorf("processVerifyBatches: tx is nil, is mandatory to pass a tx") - } - log.Debugf("VerifyBatches: rollupExitTree.UpsertLeaf (blockNumber=%d, event=%s)", blockNumber, event.String()) - // If ExitRoot is zero if the leaf doesnt exists doesnt change the root of tree. - // if leaf already exists doesn't make sense to 'empty' the leaf, so we keep previous value - if event.ExitRoot == (common.Hash{}) { - log.Infof("skipping VerifyBatches event with empty ExitRoot (blockNumber=%d, event=%s)", blockNumber, event.String()) - return nil - } - isNewLeaf, err := p.isNewValueForRollupExitTree(tx, event) - if err != nil { - return fmt.Errorf("isNewValueForrollupExitTree. err: %w", err) - } - if !isNewLeaf { - log.Infof("skipping VerifyBatches event with same ExitRoot (blockNumber=%d, event=%s)", blockNumber, event.String()) - return nil - } - log.Infof("UpsertLeaf VerifyBatches event (blockNumber=%d, event=%s)", blockNumber, event.String()) - newRoot, err := p.rollupExitTree.UpsertLeaf(tx, blockNumber, event.BlockPosition, treeTypes.Leaf{ - Index: event.RollupID - 1, - Hash: event.ExitRoot, - }) - if err != nil { - return fmt.Errorf("error rollupExitTree.UpsertLeaf. err: %w", err) - } - verifyBatches := event - verifyBatches.BlockNumber = blockNumber - verifyBatches.RollupExitRoot = newRoot - if err = meddler.Insert(tx, "verify_batches", verifyBatches); err != nil { - return fmt.Errorf("error inserting verify_batches. err: %w", err) - } - return nil -} - -func (p *processor) isNewValueForRollupExitTree(tx db.Querier, event *VerifyBatches) (bool, error) { - currentRoot, err := p.rollupExitTree.GetLastRoot(tx) - if err != nil && errors.Is(err, db.ErrNotFound) { - // The tree is empty, so is a new value for sure - return true, nil - } - if err != nil { - return false, fmt.Errorf("error rollupExitTree.GetLastRoot. err: %w", err) - } - leaf, err := p.rollupExitTree.GetLeaf(tx, event.RollupID-1, currentRoot.Hash) - if err != nil && errors.Is(err, db.ErrNotFound) { - // The leaf doesn't exist, so is a new value - return true, nil - } - if err != nil { - return false, fmt.Errorf("error rollupExitTree.GetLeaf. err: %w", err) - } - return leaf != event.ExitRoot, nil -} - -func (p *processor) GetLastVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - verified := &VerifyBatches{} - err := meddler.QueryRow(p.db, verified, ` - SELECT * FROM verify_batches - WHERE rollup_id = $1 - ORDER BY block_num DESC, block_pos DESC - LIMIT 1; - `, rollupID) - return verified, db.ReturnErrNotFound(err) -} - -func (p *processor) GetFirstVerifiedBatches(rollupID uint32) (*VerifyBatches, error) { - verified := &VerifyBatches{} - err := meddler.QueryRow(p.db, verified, ` - SELECT * FROM verify_batches - WHERE rollup_id = $1 - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `, rollupID) - return verified, db.ReturnErrNotFound(err) -} - -func (p *processor) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*VerifyBatches, error) { - verified := &VerifyBatches{} - err := meddler.QueryRow(p.db, verified, ` - SELECT * FROM verify_batches - WHERE rollup_id = $1 AND block_num >= $2 - ORDER BY block_num ASC, block_pos ASC - LIMIT 1; - `, rollupID, blockNum) - return verified, db.ReturnErrNotFound(err) -} diff --git a/l1infotreesync/processor_verifybatches_test.go b/l1infotreesync/processor_verifybatches_test.go deleted file mode 100644 index f8150970d..000000000 --- a/l1infotreesync/processor_verifybatches_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package l1infotreesync - -import ( - "context" - "path" - "testing" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestProcessVerifyBatchesNil(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProcessVerifyBatchesNil.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - err = sut.processVerifyBatches(nil, 1, nil) - require.Error(t, err) -} - -func TestProcessVerifyBatchesOK(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProcessVerifyBatchesOK.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - event := VerifyBatches{ - BlockPosition: 1, - RollupID: 1, - NumBatch: 1, - StateRoot: common.HexToHash("5ca1e"), - ExitRoot: common.HexToHash("b455"), - Aggregator: common.HexToAddress("beef"), - RollupExitRoot: common.HexToHash("b455"), - } - ctx := context.TODO() - tx, err := db.NewTx(ctx, sut.db) - require.NoError(t, err) - _, err = tx.Exec(`INSERT INTO block (num) VALUES ($1)`, 1) - require.NoError(t, err) - err = sut.processVerifyBatches(tx, 1, &event) - require.NoError(t, err) -} - -func TestProcessVerifyBatchesSkip0000(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestProcessVerifyBatchesSkip0000.sqlite") - sut, err := newProcessor(dbPath) - require.NoError(t, err) - event := VerifyBatches{ - BlockPosition: 1, - RollupID: 1, - NumBatch: 1, - StateRoot: common.HexToHash("5ca1e"), - ExitRoot: common.Hash{}, - Aggregator: common.HexToAddress("beef"), - RollupExitRoot: common.HexToHash("b455"), - } - ctx := context.TODO() - tx, err := db.NewTx(ctx, sut.db) - require.NoError(t, err) - err = sut.processVerifyBatches(tx, 1, &event) - require.NoError(t, err) -} - -func TestGetVerifiedBatches(t *testing.T) { - dbPath := path.Join(t.TempDir(), "l1infotreesyncTestGetVerifiedBatches.sqlite") - p, err := newProcessor(dbPath) - require.NoError(t, err) - ctx := context.Background() - - // Test ErrNotFound returned correctly on all methods - _, err = p.GetLastVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetFirstVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - _, err = p.GetFirstVerifiedBatchesAfterBlock(0, 0) - require.Equal(t, db.ErrNotFound, err) - - // First insert - expected1 := &VerifyBatches{ - RollupID: 420, - NumBatch: 69, - StateRoot: common.HexToHash("5ca1e"), - ExitRoot: common.HexToHash("b455"), - Aggregator: common.HexToAddress("beef"), - } - err = p.ProcessBlock(ctx, sync.Block{ - Num: 1, - Events: []interface{}{ - Event{VerifyBatches: expected1}, - }, - }) - require.NoError(t, err) - _, err = p.GetLastVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - actual, err := p.GetLastVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected1, actual) - actual, err = p.GetFirstVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected1, actual) - - // Second insert - expected2 := &VerifyBatches{ - RollupID: 420, - NumBatch: 690, - StateRoot: common.HexToHash("5ca1e3"), - ExitRoot: common.HexToHash("ba55"), - Aggregator: common.HexToAddress("beef3"), - } - err = p.ProcessBlock(ctx, sync.Block{ - Num: 2, - Events: []interface{}{ - Event{VerifyBatches: expected2}, - }, - }) - require.NoError(t, err) - _, err = p.GetLastVerifiedBatches(0) - require.Equal(t, db.ErrNotFound, err) - actual, err = p.GetLastVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected2, actual) - actual, err = p.GetFirstVerifiedBatches(420) - require.NoError(t, err) - require.Equal(t, expected1, actual) - actual, err = p.GetFirstVerifiedBatchesAfterBlock(420, 2) - require.NoError(t, err) - require.Equal(t, expected2, actual) -} diff --git a/lastgersync/config.go b/lastgersync/config.go deleted file mode 100644 index 36b12ab60..000000000 --- a/lastgersync/config.go +++ /dev/null @@ -1,28 +0,0 @@ -package lastgersync - -import ( - "github.com/0xPolygon/cdk/config/types" - "github.com/ethereum/go-ethereum/common" -) - -type Config struct { - // DBPath path of the DB - DBPath string `mapstructure:"DBPath"` - // BlockFinality indicates the status of the blocks that will be queried in order to sync - BlockFinality string `jsonschema:"enum=LatestBlock, enum=SafeBlock, enum=PendingBlock, enum=FinalizedBlock, enum=EarliestBlock" mapstructure:"BlockFinality"` //nolint:lll - // InitialBlockNum is the first block that will be queried when starting the synchronization from scratch. - // It should be a number equal or bellow the creation of the bridge contract - InitialBlockNum uint64 `mapstructure:"InitialBlockNum"` - // GlobalExitRootL2Addr is the address of the GER smart contract on L2 - GlobalExitRootL2Addr common.Address `mapstructure:"GlobalExitRootL2Addr"` - // RetryAfterErrorPeriod is the time that will be waited when an unexpected error happens before retry - RetryAfterErrorPeriod types.Duration `mapstructure:"RetryAfterErrorPeriod"` - // MaxRetryAttemptsAfterError is the maximum number of consecutive attempts that will happen before panicing. - // Any number smaller than zero will be considered as unlimited retries - MaxRetryAttemptsAfterError int `mapstructure:"MaxRetryAttemptsAfterError"` - // WaitForNewBlocksPeriod time that will be waited when the synchronizer has reached the latest block - WaitForNewBlocksPeriod types.Duration `mapstructure:"WaitForNewBlocksPeriod"` - // DownloadBufferSize buffer of events to be porcessed. When the buffer limit is reached, - // downloading will stop until the processing catches up. - DownloadBufferSize int `mapstructure:"DownloadBufferSize"` -} diff --git a/lastgersync/e2e_test.go b/lastgersync/e2e_test.go deleted file mode 100644 index b0782538a..000000000 --- a/lastgersync/e2e_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package lastgersync_test - -import ( - "context" - "fmt" - "path" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/lastgersync" - "github.com/0xPolygon/cdk/test/helpers" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestE2E(t *testing.T) { - ctx := context.Background() - setup := helpers.NewE2EEnvWithEVML2(t) - dbPathSyncer := path.Join(t.TempDir(), "lastgersyncTestE2E.sqlite") - syncer, err := lastgersync.New( - ctx, - dbPathSyncer, - setup.L2Environment.ReorgDetector, - setup.L2Environment.SimBackend.Client(), - setup.L2Environment.GERAddr, - setup.InfoTreeSync, - 0, - 0, - etherman.LatestBlock, - time.Millisecond*30, - 10, - ) - require.NoError(t, err) - go syncer.Start(ctx) - - for i := 0; i < 10; i++ { - // Update GER on L1 - _, err := setup.L1Environment.GERContract.UpdateExitRoot(setup.L1Environment.Auth, common.HexToHash(strconv.Itoa(i))) - require.NoError(t, err) - setup.L1Environment.SimBackend.Commit() - time.Sleep(time.Millisecond * 150) - expectedGER, err := setup.L1Environment.GERContract.GetLastGlobalExitRoot(&bind.CallOpts{Pending: false}) - require.NoError(t, err) - isInjected, err := setup.AggoracleSender.IsGERInjected(expectedGER) - require.NoError(t, err) - require.True(t, isInjected, fmt.Sprintf("iteration %d, GER: %s", i, common.Bytes2Hex(expectedGER[:]))) - - // Wait for syncer to catch up - lb, err := setup.L2Environment.SimBackend.Client().BlockNumber(ctx) - require.NoError(t, err) - helpers.RequireProcessorUpdated(t, syncer, lb) - - e, err := syncer.GetFirstGERAfterL1InfoTreeIndex(ctx, uint32(i)) - require.NoError(t, err, fmt.Sprint("iteration: ", i)) - require.Equal(t, common.Hash(expectedGER), e.GlobalExitRoot, fmt.Sprint("iteration: ", i)) - } -} diff --git a/lastgersync/evmdownloader.go b/lastgersync/evmdownloader.go deleted file mode 100644 index 9a203b596..000000000 --- a/lastgersync/evmdownloader.go +++ /dev/null @@ -1,180 +0,0 @@ -package lastgersync - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/globalexitrootmanagerl2sovereignchain" - cdkcommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" -) - -type EthClienter interface { - ethereum.LogFilterer - ethereum.BlockNumberReader - ethereum.ChainReader - bind.ContractBackend -} - -type downloader struct { - *sync.EVMDownloaderImplementation - l2GERManager *globalexitrootmanagerl2sovereignchain.Globalexitrootmanagerl2sovereignchain - l1InfoTreesync *l1infotreesync.L1InfoTreeSync - processor *processor - rh *sync.RetryHandler -} - -func newDownloader( - l2Client EthClienter, - l2GERAddr common.Address, - l1InfoTreeSync *l1infotreesync.L1InfoTreeSync, - processor *processor, - rh *sync.RetryHandler, - blockFinality *big.Int, - waitForNewBlocksPeriod time.Duration, -) (*downloader, error) { - gerContract, err := globalexitrootmanagerl2sovereignchain.NewGlobalexitrootmanagerl2sovereignchain( - l2GERAddr, l2Client) - if err != nil { - return nil, err - } - - return &downloader{ - EVMDownloaderImplementation: sync.NewEVMDownloaderImplementation( - "lastgersync", l2Client, blockFinality, waitForNewBlocksPeriod, nil, nil, nil, rh, - ), - l2GERManager: gerContract, - l1InfoTreesync: l1InfoTreeSync, - processor: processor, - rh: rh, - }, nil -} - -func (d *downloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan sync.EVMBlock) { - var ( - attempts int - nextIndex uint32 - err error - ) - for { - lastIndex, err := d.processor.getLastIndex() - if errors.Is(err, db.ErrNotFound) { - nextIndex = 0 - } else if err != nil { - log.Errorf("error getting last indes: %v", err) - attempts++ - d.rh.Handle("getLastIndex", attempts) - - continue - } - if lastIndex > 0 { - nextIndex = lastIndex + 1 - } - break - } - for { - select { - case <-ctx.Done(): - log.Debug("closing channel") - close(downloadedCh) - - return - default: - } - fromBlock = d.WaitForNewBlocks(ctx, fromBlock) - - attempts = 0 - var gers []Event - for { - gers, err = d.getGERsFromIndex(ctx, nextIndex) - if err != nil { - log.Errorf("error getting GERs: %v", err) - attempts++ - d.rh.Handle("getGERsFromIndex", attempts) - - continue - } - - break - } - - blockHeader, isCanceled := d.GetBlockHeader(ctx, fromBlock) - if isCanceled { - return - } - - block := &sync.EVMBlock{ - EVMBlockHeader: sync.EVMBlockHeader{ - Num: blockHeader.Num, - Hash: blockHeader.Hash, - ParentHash: blockHeader.ParentHash, - Timestamp: blockHeader.Timestamp, - }, - } - d.setGreatestGERInjectedFromList(block, gers) - - downloadedCh <- *block - if len(block.Events) > 0 { - event, ok := block.Events[0].(Event) - if !ok { - log.Errorf("unexpected type %T in events", block.Events[0]) - } - nextIndex = event.L1InfoTreeIndex + 1 - } - } -} - -func (d *downloader) getGERsFromIndex(ctx context.Context, fromL1InfoTreeIndex uint32) ([]Event, error) { - lastRoot, err := d.l1InfoTreesync.GetLastL1InfoTreeRoot(ctx) - if errors.Is(err, db.ErrNotFound) { - return nil, nil - } - if err != nil { - return nil, fmt.Errorf("error calling GetLastL1InfoTreeRoot: %w", err) - } - - gers := []Event{} - for i := fromL1InfoTreeIndex; i <= lastRoot.Index; i++ { - info, err := d.l1InfoTreesync.GetInfoByIndex(ctx, i) - if err != nil { - return nil, fmt.Errorf("error calling GetInfoByIndex: %w", err) - } - gers = append(gers, Event{ - L1InfoTreeIndex: i, - GlobalExitRoot: info.GlobalExitRoot, - }) - } - - return gers, nil -} - -func (d *downloader) setGreatestGERInjectedFromList(b *sync.EVMBlock, list []Event) { - for _, event := range list { - var attempts int - for { - blockHashBigInt, err := d.l2GERManager.GlobalExitRootMap(&bind.CallOpts{Pending: false}, event.GlobalExitRoot) - if err != nil { - attempts++ - log.Errorf("failed to check if global exit root %s is injected on L2: %s", event.GlobalExitRoot.Hex(), err) - d.rh.Handle("GlobalExitRootMap", attempts) - - continue - } - - if common.BigToHash(blockHashBigInt) != cdkcommon.ZeroHash { - b.Events = []interface{}{event} - } - - break - } - } -} diff --git a/lastgersync/lastgersync.go b/lastgersync/lastgersync.go deleted file mode 100644 index 6d8f3509f..000000000 --- a/lastgersync/lastgersync.go +++ /dev/null @@ -1,84 +0,0 @@ -package lastgersync - -import ( - "context" - "time" - - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/sync" - "github.com/ethereum/go-ethereum/common" -) - -const ( - reorgDetectorID = "lastGERSync" -) - -type LastGERSync struct { - driver *sync.EVMDriver - processor *processor -} - -func New( - ctx context.Context, - dbPath string, - rdL2 sync.ReorgDetector, - l2Client EthClienter, - globalExitRootL2 common.Address, - l1InfoTreesync *l1infotreesync.L1InfoTreeSync, - retryAfterErrorPeriod time.Duration, - maxRetryAttemptsAfterError int, - blockFinality etherman.BlockNumberFinality, - waitForNewBlocksPeriod time.Duration, - downloadBufferSize int, -) (*LastGERSync, error) { - processor, err := newProcessor(dbPath, "lastGERSync") - if err != nil { - return nil, err - } - - rh := &sync.RetryHandler{ - RetryAfterErrorPeriod: retryAfterErrorPeriod, - MaxRetryAttemptsAfterError: maxRetryAttemptsAfterError, - } - bf, err := blockFinality.ToBlockNum() - if err != nil { - return nil, err - } - downloader, err := newDownloader( - l2Client, - globalExitRootL2, - l1InfoTreesync, - processor, - rh, - bf, - waitForNewBlocksPeriod, - ) - if err != nil { - return nil, err - } - - driver, err := sync.NewEVMDriver(rdL2, processor, downloader, reorgDetectorID, downloadBufferSize, rh) - if err != nil { - return nil, err - } - - return &LastGERSync{ - driver: driver, - processor: processor, - }, nil -} - -func (s *LastGERSync) Start(ctx context.Context) { - s.driver.Sync(ctx) -} - -func (s *LastGERSync) GetFirstGERAfterL1InfoTreeIndex( - ctx context.Context, atOrAfterL1InfoTreeIndex uint32, -) (Event, error) { - return s.processor.GetFirstGERAfterL1InfoTreeIndex(ctx, atOrAfterL1InfoTreeIndex) -} - -func (s *LastGERSync) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - return s.processor.GetLastProcessedBlock(ctx) -} diff --git a/lastgersync/migrations/lastgersync0001.sql b/lastgersync/migrations/lastgersync0001.sql deleted file mode 100644 index 88021fa10..000000000 --- a/lastgersync/migrations/lastgersync0001.sql +++ /dev/null @@ -1,14 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS block; -DROP TABLE IF EXISTS global_exit_root; - --- +migrate Up -CREATE TABLE block ( - num BIGINT PRIMARY KEY -); - -CREATE TABLE imported_global_exit_root ( - block_num INTEGER PRIMARY KEY REFERENCES block(num) ON DELETE CASCADE, - global_exit_root VARCHAR NOT NULL, - l1_info_tree_index INTEGER NOT NULL -); \ No newline at end of file diff --git a/lastgersync/migrations/migrations.go b/lastgersync/migrations/migrations.go deleted file mode 100644 index d55dd4498..000000000 --- a/lastgersync/migrations/migrations.go +++ /dev/null @@ -1,21 +0,0 @@ -package migrations - -import ( - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" -) - -//go:embed lastgersync0001.sql -var mig001 string - -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "lastgersync0001", - SQL: mig001, - }, - } - return db.RunMigrations(dbPath, migrations) -} diff --git a/lastgersync/processor.go b/lastgersync/processor.go deleted file mode 100644 index 545c2495b..000000000 --- a/lastgersync/processor.go +++ /dev/null @@ -1,138 +0,0 @@ -package lastgersync - -import ( - "context" - "database/sql" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/lastgersync/migrations" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/sync" - ethCommon "github.com/ethereum/go-ethereum/common" - "github.com/russross/meddler" -) - -type Event struct { - GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` - L1InfoTreeIndex uint32 `meddler:"l1_info_tree_index"` -} - -type eventWithBlockNum struct { - GlobalExitRoot ethCommon.Hash `meddler:"global_exit_root,hash"` - L1InfoTreeIndex uint32 `meddler:"l1_info_tree_index"` - BlockNum uint64 `meddler:"block_num"` -} - -type processor struct { - db *sql.DB - log *log.Logger -} - -func newProcessor(dbPath string, loggerPrefix string) (*processor, error) { - err := migrations.RunMigrations(dbPath) - if err != nil { - return nil, err - } - db, err := db.NewSQLiteDB(dbPath) - if err != nil { - return nil, err - } - logger := log.WithFields("lastger-syncer", loggerPrefix) - return &processor{ - db: db, - log: logger, - }, nil -} - -// GetLastProcessedBlock returns the last processed block by the processor, including blocks -// that don't have events -func (p *processor) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - var lastProcessedBlock uint64 - row := p.db.QueryRow("SELECT num FROM BLOCK ORDER BY num DESC LIMIT 1;") - err := row.Scan(&lastProcessedBlock) - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - return lastProcessedBlock, err -} - -func (p *processor) getLastIndex() (uint32, error) { - var lastIndex uint32 - row := p.db.QueryRow(` - SELECT l1_info_tree_index - FROM imported_global_exit_root - ORDER BY l1_info_tree_index DESC LIMIT 1; - `) - err := row.Scan(&lastIndex) - if errors.Is(err, sql.ErrNoRows) { - return 0, nil - } - return lastIndex, err -} - -func (p *processor) ProcessBlock(ctx context.Context, block sync.Block) error { - tx, err := db.NewTx(ctx, p.db) - if err != nil { - return err - } - shouldRollback := true - defer func() { - if shouldRollback { - if errRollback := tx.Rollback(); errRollback != nil { - log.Errorf("error while rolling back tx %v", errRollback) - } - } - }() - - if _, err := tx.Exec(`INSERT INTO block (num) VALUES ($1)`, block.Num); err != nil { - return err - } - for _, e := range block.Events { - event, ok := e.(Event) - if !ok { - return errors.New("failed to convert sync.Block.Event to Event") - } - if err = meddler.Insert(tx, "imported_global_exit_root", &eventWithBlockNum{ - GlobalExitRoot: event.GlobalExitRoot, - L1InfoTreeIndex: event.L1InfoTreeIndex, - BlockNum: block.Num, - }); err != nil { - return err - } - } - - if err := tx.Commit(); err != nil { - return err - } - shouldRollback = false - p.log.Debugf("processed %d events until block %d", len(block.Events), block.Num) - return nil -} - -func (p *processor) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - _, err := p.db.Exec(`DELETE FROM block WHERE num >= $1;`, firstReorgedBlock) - return fmt.Errorf("error processing reorg: %w", err) -} - -// GetFirstGERAfterL1InfoTreeIndex returns the first GER injected on the chain that is related to l1InfoTreeIndex -// or greater -func (p *processor) GetFirstGERAfterL1InfoTreeIndex( - ctx context.Context, l1InfoTreeIndex uint32, -) (Event, error) { - e := Event{} - err := meddler.QueryRow(p.db, &e, ` - SELECT l1_info_tree_index, global_exit_root - FROM imported_global_exit_root - WHERE l1_info_tree_index >= $1 - ORDER BY l1_info_tree_index ASC LIMIT 1; - `, l1InfoTreeIndex) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return e, db.ErrNotFound - } - return e, err - } - return e, nil -} diff --git a/log/config.go b/log/config.go deleted file mode 100644 index 4ebbf5020..000000000 --- a/log/config.go +++ /dev/null @@ -1,15 +0,0 @@ -package log - -// Config for log -type Config struct { - // Environment defining the log format ("production" or "development"). - // In development mode enables development mode (which makes DPanicLevel logs panic), - // uses a console encoder, writes to standard error, and disables sampling. - // Stacktraces are automatically included on logs of WarnLevel and above. - // Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) - Environment LogEnvironment `mapstructure:"Environment" jsonschema:"enum=production,enum=development"` - // Level of log. As lower value more logs are going to be generated - Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"` //nolint:lll - // Outputs - Outputs []string `mapstructure:"Outputs"` -} diff --git a/log/log.go b/log/log.go deleted file mode 100644 index b6d9d1cc4..000000000 --- a/log/log.go +++ /dev/null @@ -1,330 +0,0 @@ -package log - -import ( - "fmt" - "os" - "strings" - "sync/atomic" - - zkevm "github.com/0xPolygon/cdk" - "github.com/hermeznetwork/tracerr" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" -) - -// LogEnvironment represents the possible log environments. -type LogEnvironment string - -const ( - // EnvironmentProduction production log environment. - EnvironmentProduction = LogEnvironment("production") - // EnvironmentDevelopment development log environment. - EnvironmentDevelopment = LogEnvironment("development") -) - -// Logger is a wrapper providing logging facilities. -type Logger struct { - x *zap.SugaredLogger -} - -// root logger -var log atomic.Pointer[Logger] - -func GetDefaultLogger() *Logger { - l := log.Load() - if l != nil { - return l - } - // default level: debug - zapLogger, _, err := NewLogger( - Config{ - Environment: EnvironmentDevelopment, - Level: "debug", - Outputs: []string{"stderr"}, - }) - if err != nil { - panic(err) - } - log.Store(&Logger{x: zapLogger}) - return log.Load() -} - -// Init the logger with defined level. outputs defines the outputs where the -// logs will be sent. By default outputs contains "stdout", which prints the -// logs at the output of the process. To add a log file as output, the path -// should be added at the outputs array. To avoid printing the logs but storing -// them on a file, can use []string{"pathtofile.log"} -func Init(cfg Config) { - zapLogger, _, err := NewLogger(cfg) - if err != nil { - panic(err) - } - log.Store(&Logger{x: zapLogger}) -} - -// NewLogger creates the logger with defined level. outputs defines the outputs where the -// logs will be sent. By default, outputs contains "stdout", which prints the -// logs at the output of the process. To add a log file as output, the path -// should be added at the outputs array. To avoid printing the logs but storing -// them on a file, can use []string{"pathtofile.log"} -func NewLogger(cfg Config) (*zap.SugaredLogger, *zap.AtomicLevel, error) { - var level zap.AtomicLevel - err := level.UnmarshalText([]byte(cfg.Level)) - if err != nil { - return nil, nil, fmt.Errorf("error on setting log level: %w", err) - } - - var zapCfg zap.Config - - switch cfg.Environment { - case EnvironmentProduction: - zapCfg = zap.NewProductionConfig() - default: - zapCfg = zap.NewDevelopmentConfig() - zapCfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder - } - zapCfg.Level = level - zapCfg.OutputPaths = cfg.Outputs - zapCfg.InitialFields = map[string]interface{}{ - "version": zkevm.Version, - "pid": os.Getpid(), - } - - logger, err := zapCfg.Build() - if err != nil { - return nil, nil, err - } - defer logger.Sync() //nolint:errcheck - - // skip 2 callers: one for our wrapper methods and one for the package functions - withOptions := logger.WithOptions(zap.AddCallerSkip(2)) //nolint:mnd - return withOptions.Sugar(), &level, nil -} - -// WithFields returns a new Logger (derived from the root one) with additional -// fields as per keyValuePairs. The root Logger instance is not affected. -func WithFields(keyValuePairs ...interface{}) *Logger { - l := GetDefaultLogger().WithFields(keyValuePairs...) - - // since we are returning a new instance, remove one caller from the - // stack, because we'll be calling the retruned Logger methods - // directly, not the package functions. - x := l.x.WithOptions(zap.AddCallerSkip(-1)) - l.x = x - return l -} - -// WithFields returns a new Logger with additional fields as per keyValuePairs. -// The original Logger instance is not affected. -func (l *Logger) WithFields(keyValuePairs ...interface{}) *Logger { - return &Logger{ - x: l.x.With(keyValuePairs...), - } -} - -// GetSugaredLogger is a getter function that returns instance of already built zap.SugaredLogger. -func (l *Logger) GetSugaredLogger() *zap.SugaredLogger { - return l.x -} - -func sprintStackTrace(st []tracerr.Frame) string { - builder := strings.Builder{} - // Skip deepest frame because it belongs to the go runtime and we don't - // care about it. - if len(st) > 0 { - st = st[:len(st)-1] - } - for _, f := range st { - builder.WriteString(fmt.Sprintf("\n%s:%d %s()", f.Path, f.Line, f.Func)) - } - builder.WriteString("\n") - return builder.String() -} - -// appendStackTraceMaybeArgs will append the stacktrace to the args -func appendStackTraceMaybeArgs(args []interface{}) []interface{} { - for i := range args { - if err, ok := args[i].(error); ok { - err = tracerr.Wrap(err) - st := tracerr.StackTrace(err) - return append(args, sprintStackTrace(st)) - } - } - return args -} - -// Debug calls log.Debug -func (l *Logger) Debug(args ...interface{}) { - l.x.Debug(args...) -} - -// Info calls log.Info -func (l *Logger) Info(args ...interface{}) { - l.x.Info(args...) -} - -// Warn calls log.Warn -func (l *Logger) Warn(args ...interface{}) { - l.x.Warn(args...) -} - -// Error calls log.Error -func (l *Logger) Error(args ...interface{}) { - l.x.Error(args...) -} - -// Fatal calls log.Fatal -func (l *Logger) Fatal(args ...interface{}) { - l.x.Fatal(args...) -} - -// Debugf calls log.Debugf -func (l *Logger) Debugf(template string, args ...interface{}) { - l.x.Debugf(template, args...) -} - -// Infof calls log.Infof -func (l *Logger) Infof(template string, args ...interface{}) { - l.x.Infof(template, args...) -} - -// Warnf calls log.Warnf -func (l *Logger) Warnf(template string, args ...interface{}) { - l.x.Warnf(template, args...) -} - -// Fatalf calls log.Fatalf -func (l *Logger) Fatalf(template string, args ...interface{}) { - l.x.Fatalf(template, args...) -} - -// Errorf calls log.Errorf and stores the error message into the ErrorFile -func (l *Logger) Errorf(template string, args ...interface{}) { - l.x.Errorf(template, args...) -} - -// Debug calls log.Debug on the root Logger. -func Debug(args ...interface{}) { - GetDefaultLogger().Debug(args...) -} - -// Info calls log.Info on the root Logger. -func Info(args ...interface{}) { - GetDefaultLogger().Info(args...) -} - -// Warn calls log.Warn on the root Logger. -func Warn(args ...interface{}) { - GetDefaultLogger().Warn(args...) -} - -// Error calls log.Error on the root Logger. -func Error(args ...interface{}) { - args = appendStackTraceMaybeArgs(args) - GetDefaultLogger().Error(args...) -} - -// Fatal calls log.Fatal on the root Logger. -func Fatal(args ...interface{}) { - args = appendStackTraceMaybeArgs(args) - GetDefaultLogger().Fatal(args...) -} - -// Debugf calls log.Debugf on the root Logger. -func Debugf(template string, args ...interface{}) { - GetDefaultLogger().Debugf(template, args...) -} - -// Infof calls log.Infof on the root Logger. -func Infof(template string, args ...interface{}) { - GetDefaultLogger().Infof(template, args...) -} - -// Warnf calls log.Warnf on the root Logger. -func Warnf(template string, args ...interface{}) { - GetDefaultLogger().Warnf(template, args...) -} - -// Fatalf calls log.Fatalf on the root Logger. -func Fatalf(template string, args ...interface{}) { - args = appendStackTraceMaybeArgs(args) - GetDefaultLogger().Fatalf(template, args...) -} - -// Errorf calls log.Errorf on the root logger and stores the error message into -// the ErrorFile. -func Errorf(template string, args ...interface{}) { - args = appendStackTraceMaybeArgs(args) - GetDefaultLogger().Errorf(template, args...) -} - -// Debugw calls log.Debugw -func (l *Logger) Debugw(msg string, kv ...interface{}) { - l.x.Debugw(msg, kv...) -} - -// Infow calls log.Infow -func (l *Logger) Infow(msg string, kv ...interface{}) { - l.x.Infow(msg, kv...) -} - -// Warnw calls log.Warnw -func (l *Logger) Warnw(msg string, kv ...interface{}) { - l.x.Warnw(msg, kv...) -} - -// Errorw calls log.Errorw -func (l *Logger) Errorw(msg string, kv ...interface{}) { - l.x.Errorw(msg, kv...) -} - -// Fatalw calls log.Fatalw -func (l *Logger) Fatalw(msg string, kv ...interface{}) { - l.x.Fatalw(msg, kv...) -} - -// Debugw calls log.Debugw on the root Logger. -func Debugw(msg string, kv ...interface{}) { - GetDefaultLogger().Debugw(msg, kv...) -} - -// Infow calls log.Infow on the root Logger. -func Infow(msg string, kv ...interface{}) { - GetDefaultLogger().Infow(msg, kv...) -} - -// Warnw calls log.Warnw on the root Logger. -func Warnw(msg string, kv ...interface{}) { - GetDefaultLogger().Warnw(msg, kv...) -} - -// Errorw calls log.Errorw on the root Logger. -func Errorw(msg string, kv ...interface{}) { - msg = appendStackTraceMaybeKV(msg, kv) - GetDefaultLogger().Errorw(msg, kv...) -} - -// Fatalw calls log.Fatalw on the root Logger. -func Fatalw(msg string, kv ...interface{}) { - msg = appendStackTraceMaybeKV(msg, kv) - GetDefaultLogger().Fatalw(msg, kv...) -} - -// appendStackTraceMaybeKV will append the stacktrace to the KV -func appendStackTraceMaybeKV(msg string, kv []interface{}) string { - for i := range kv { - if i%2 == 0 { - continue - } - if err, ok := kv[i].(error); ok { - err = tracerr.Wrap(err) - st := tracerr.StackTrace(err) - return fmt.Sprintf("%v: %v%v\n", msg, err, sprintStackTrace(st)) - } - } - return msg -} - -func (l *Logger) IsEnabledLogLevel(lvl zapcore.Level) bool { - return l.x.Level().Enabled(lvl) -} diff --git a/log/log_test.go b/log/log_test.go deleted file mode 100644 index 9a596608d..000000000 --- a/log/log_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package log - -import ( - "testing" -) - -func TestLogNotInitialized(t *testing.T) { - Info("Test log.Info", " value is ", 10) - Infof("Test log.Infof %d", 10) - Infow("Test log.Infow", "value", 10) - Debugf("Test log.Debugf %d", 10) - Error("Test log.Error", " value is ", 10) - Errorf("Test log.Errorf %d", 10) - Errorw("Test log.Errorw", "value", 10) - Warnf("Test log.Warnf %d", 10) - Warnw("Test log.Warnw", "value", 10) -} - -func TestLog(t *testing.T) { - cfg := Config{ - Environment: EnvironmentDevelopment, - Level: "debug", - Outputs: []string{"stderr"}, // []string{"stdout", "test.log"} - } - - Init(cfg) - - Info("Test log.Info", " value is ", 10) - Infof("Test log.Infof %d", 10) - Infow("Test log.Infow", "value", 10) - Debugf("Test log.Debugf %d", 10) - Error("Test log.Error", " value is ", 10) - Errorf("Test log.Errorf %d", 10) - Errorw("Test log.Errorw", "value", 10) - Warnf("Test log.Warnf %d", 10) - Warnw("Test log.Warnw", "value", 10) -} diff --git a/logerror b/logerror deleted file mode 100644 index cf3e44c15..000000000 --- a/logerror +++ /dev/null @@ -1 +0,0 @@ -ok github.com/0xPolygon/cdk/l1infotreesync 2.438s diff --git a/merkletree/key.go b/merkletree/key.go deleted file mode 100644 index 7926df60a..000000000 --- a/merkletree/key.go +++ /dev/null @@ -1,207 +0,0 @@ -package merkletree - -import ( - "math" - "math/big" - - "github.com/ethereum/go-ethereum/common" - poseidon "github.com/iden3/go-iden3-crypto/goldenposeidon" -) - -// Key stores key of the leaf -type Key [32]byte - -const ( - // HashPoseidonAllZeroes represents the poseidon hash for an input with all - // bits set to zero. - HashPoseidonAllZeroes = "0xc71603f33a1144ca7953db0ab48808f4c4055e3364a246c33c18a9786cb0b359" -) - -// keyEthAddr is the common code for all the keys related to ethereum addresses. -func keyEthAddr(ethAddr common.Address, leafType leafType, key1Capacity [4]uint64) ([]byte, error) { - ethAddrBI := new(big.Int).SetBytes(ethAddr.Bytes()) - ethAddrArr := scalar2fea(ethAddrBI) - - key1 := [8]uint64{ - ethAddrArr[0], - ethAddrArr[1], - ethAddrArr[2], - ethAddrArr[3], - ethAddrArr[4], - 0, - uint64(leafType), - 0, - } - - result, err := poseidon.Hash(key1, key1Capacity) - if err != nil { - return nil, err - } - - return h4ToFilledByteSlice(result[:]), nil -} - -func defaultCapIn() ([4]uint64, error) { - capIn, err := StringToh4(HashPoseidonAllZeroes) - if err != nil { - return [4]uint64{}, err - } - - return [4]uint64{capIn[0], capIn[1], capIn[2], capIn[3]}, nil -} - -// KeyEthAddrBalance returns the key of balance leaf: -// hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 0, 0], -// [hk0[0], hk0[1], hk0[2], hk0[3]]) -func KeyEthAddrBalance(ethAddr common.Address) ([]byte, error) { - capIn, err := defaultCapIn() - if err != nil { - return nil, err - } - - return keyEthAddr(ethAddr, LeafTypeBalance, capIn) -} - -// KeyEthAddrNonce returns the key of nonce leaf: -// hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 1, 0], -// [hk0[0], hk0[1], hk0[2], hk0[3]] -func KeyEthAddrNonce(ethAddr common.Address) ([]byte, error) { - capIn, err := defaultCapIn() - if err != nil { - return nil, err - } - - return keyEthAddr(ethAddr, LeafTypeNonce, capIn) -} - -// KeyContractCode returns the key of contract code leaf: -// hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 2, 0], -// [hk0[0], hk0[1], hk0[2], hk0[3]] -func KeyContractCode(ethAddr common.Address) ([]byte, error) { - capIn, err := defaultCapIn() - if err != nil { - return nil, err - } - - return keyEthAddr(ethAddr, LeafTypeCode, capIn) -} - -// KeyContractStorage returns the key of contract storage position leaf: -// hk0: H([stoPos[0:4], stoPos[4:8], stoPos[8:12], stoPos[12:16], stoPos[16:20], stoPos[20:24], -// stoPos[24:28], stoPos[28:32], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 3, 0], -// [hk0[0], hk0[1], hk0[2], hk0[3]) -func KeyContractStorage(ethAddr common.Address, storagePos []byte) ([]byte, error) { - storageBI := new(big.Int).SetBytes(storagePos) - - storageArr := scalar2fea(storageBI) - - hk0, err := poseidon.Hash([8]uint64{ - storageArr[0], - storageArr[1], - storageArr[2], - storageArr[3], - storageArr[4], - storageArr[5], - storageArr[6], - storageArr[7], - }, [4]uint64{}) - if err != nil { - return nil, err - } - - return keyEthAddr(ethAddr, LeafTypeStorage, hk0) -} - -// HashContractBytecode computes the bytecode hash in order to add it to the -// state-tree. -func HashContractBytecode(code []byte) ([]uint64, error) { - const ( - bytecodeElementsHash = 8 - bytecodeBytesElement = 7 - - maxBytesToAdd = bytecodeElementsHash * bytecodeBytesElement - ) - - // add 0x01 - code = append(code, 0x01) //nolint:mnd - - // add padding - for len(code)%(56) != 0 { - code = append(code, 0x00) //nolint:mnd - } - - code[len(code)-1] = code[len(code)-1] | 0x80 //nolint:mnd - - numHashes := int(math.Ceil(float64(len(code)) / float64(maxBytesToAdd))) - - tmpHash := [4]uint64{} - var err error - - bytesPointer := 0 - for i := 0; i < numHashes; i++ { - elementsToHash := [12]uint64{} - - for j := 0; j < 4; j++ { - elementsToHash[j] = tmpHash[j] - } - - subsetBytecode := code[bytesPointer : int(math.Min(float64(len(code)-1), float64(bytesPointer+maxBytesToAdd)))+1] - bytesPointer += maxBytesToAdd - tmpElem := [7]byte{} - counter := 0 - index := 4 - for j := 0; j < maxBytesToAdd; j++ { - byteToAdd := []byte{0} - - if j < len(subsetBytecode) { - byteToAdd = subsetBytecode[j : j+1] - } - - tmpElem[bytecodeBytesElement-1-counter] = byteToAdd[0] - counter++ - - if counter == bytecodeBytesElement { - elementsToHash[index] = new(big.Int).SetBytes(tmpElem[:]).Uint64() - index++ - tmpElem = [7]byte{} - counter = 0 - } - } - tmpHash, err = poseidon.Hash([8]uint64{ - elementsToHash[4], - elementsToHash[5], - elementsToHash[6], - elementsToHash[7], - elementsToHash[8], - elementsToHash[9], - elementsToHash[10], - elementsToHash[11], - }, [4]uint64{ - elementsToHash[0], - elementsToHash[1], - elementsToHash[2], - elementsToHash[3], - }) - if err != nil { - return nil, err - } - } - return tmpHash[:], nil -} - -// KeyCodeLength returns the key of code length leaf: -// hk0: H([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0]) -// key: H([ethAddr[0:4], ethAddr[4:8], ethAddr[8:12], ethAddr[12:16], ethAddr[16:20], 0, 4, 0], -// [hk0[0], hk0[1], hk0[2], hk0[3]] -func KeyCodeLength(ethAddr common.Address) ([]byte, error) { - capIn, err := defaultCapIn() - if err != nil { - return nil, err - } - - return keyEthAddr(ethAddr, LeafTypeSCLength, capIn) -} diff --git a/merkletree/key_test.go b/merkletree/key_test.go deleted file mode 100644 index fab056f95..000000000 --- a/merkletree/key_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package merkletree - -import ( - "encoding/hex" - "encoding/json" - "fmt" - "math/big" - "os" - "path" - "runtime" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type testVectorKey struct { - EthAddr string `json:"ethAddr"` - StoragePosition string `json:"storagePosition"` - ExpectedKey string `json:"expectedKey"` -} - -type bytecodeTest struct { - Bytecode string `json:"bytecode"` - ExpectedHash string `json:"expectedHash"` -} - -func init() { - // Change dir to project root - // This is important because we have relative paths to files containing test vectors - _, filename, _, _ := runtime.Caller(0) //nolint:dogsled - dir := path.Join(path.Dir(filename), "../") - - if err := os.Chdir(dir); err != nil { - panic(err) - } -} - -func Test_CommonKeys(t *testing.T) { - tcs := []struct { - description string - testVectorFile string - keyFunc func(common.Address) ([]byte, error) - }{ - { - description: "keyEthAddressBalance", - testVectorFile: "test/vectors/src/merkle-tree/smt-key-eth-balance.json", - keyFunc: KeyEthAddrBalance, - }, - { - description: "keyEthAddressNonce", - testVectorFile: "test/vectors/src/merkle-tree/smt-key-eth-nonce.json", - keyFunc: KeyEthAddrNonce, - }, - { - description: "keyContractCode", - testVectorFile: "test/vectors/src/merkle-tree/smt-key-contract-code.json", - keyFunc: KeyContractCode, - }, - { - description: "keyCodeLength", - testVectorFile: "test/vectors/src/merkle-tree/smt-key-contract-length.json", - keyFunc: KeyCodeLength, - }, - } - for _, tc := range tcs { - tc := tc - - data, err := os.ReadFile(tc.testVectorFile) - require.NoError(t, err) - - var testVectors []testVectorKey - err = json.Unmarshal(data, &testVectors) - require.NoError(t, err) - - for ti, testVector := range testVectors { - t.Run(fmt.Sprintf("%s, test vector %d", tc.description, ti), func(t *testing.T) { - key, err := tc.keyFunc(common.HexToAddress(testVector.EthAddr)) - require.NoError(t, err) - require.Equal(t, len(key), maxBigIntLen) - - expected, _ := new(big.Int).SetString(testVector.ExpectedKey, 10) - assert.Equal(t, hex.EncodeToString(expected.Bytes()), hex.EncodeToString(key)) - }) - } - } -} - -func Test_KeyContractStorage(t *testing.T) { - data, err := os.ReadFile("test/vectors/src/merkle-tree/smt-key-contract-storage.json") - require.NoError(t, err) - - var testVectors []testVectorKey - err = json.Unmarshal(data, &testVectors) - require.NoError(t, err) - - for ti, testVector := range testVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - storagePosition, ok := new(big.Int).SetString(testVector.StoragePosition, 10) - require.True(t, ok) - key, err := KeyContractStorage(common.HexToAddress(testVector.EthAddr), storagePosition.Bytes()) - require.NoError(t, err) - require.Equal(t, len(key), maxBigIntLen) - - expected, _ := new(big.Int).SetString(testVector.ExpectedKey, 10) - assert.Equal(t, hex.EncodeToString(expected.Bytes()), hex.EncodeToString(key)) - }) - } -} - -func Test_byteCodeHash(t *testing.T) { - data, err := os.ReadFile("test/vectors/src/merkle-tree/smt-hash-bytecode.json") - require.NoError(t, err) - - var testVectors []bytecodeTest - err = json.Unmarshal(data, &testVectors) - require.NoError(t, err) - - for ti, testVector := range testVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - hash, err := HashContractBytecode(common.Hex2Bytes(testVector.Bytecode)) - require.NoError(t, err) - assert.Equal(t, common.HexToHash(testVector.ExpectedHash), common.HexToHash(H4ToString(hash))) - }) - } -} diff --git a/merkletree/leaf.go b/merkletree/leaf.go deleted file mode 100644 index 5321f69ac..000000000 --- a/merkletree/leaf.go +++ /dev/null @@ -1,17 +0,0 @@ -package merkletree - -// leafType specifies type of the leaf -type leafType uint8 - -const ( - // LeafTypeBalance specifies that leaf stores Balance - LeafTypeBalance leafType = 0 - // LeafTypeNonce specifies that leaf stores Nonce - LeafTypeNonce leafType = 1 - // LeafTypeCode specifies that leaf stores Code - LeafTypeCode leafType = 2 - // LeafTypeStorage specifies that leaf stores Storage Value - LeafTypeStorage leafType = 3 - // LeafTypeSCLength specifies that leaf stores Storage Value - LeafTypeSCLength leafType = 4 -) diff --git a/merkletree/split.go b/merkletree/split.go deleted file mode 100644 index e264807a5..000000000 --- a/merkletree/split.go +++ /dev/null @@ -1,96 +0,0 @@ -package merkletree - -import ( - "encoding/binary" - "fmt" - "math/big" - "strings" - - "github.com/0xPolygon/cdk/hex" -) - -// maxBigIntLen is 256 bits (32 bytes) -const maxBigIntLen = 32 - -// wordLength is the number of bits of each ff limb -const wordLength = 64 - -// scalar2fea splits a *big.Int into array of 32bit uint64 values. -func scalar2fea(value *big.Int) []uint64 { - val := make([]uint64, 8) //nolint:mnd - mask, _ := new(big.Int).SetString("FFFFFFFF", 16) //nolint:mnd - val[0] = new(big.Int).And(value, mask).Uint64() - val[1] = new(big.Int).And(new(big.Int).Rsh(value, 32), mask).Uint64() //nolint:mnd - val[2] = new(big.Int).And(new(big.Int).Rsh(value, 64), mask).Uint64() //nolint:mnd - val[3] = new(big.Int).And(new(big.Int).Rsh(value, 96), mask).Uint64() //nolint:mnd - val[4] = new(big.Int).And(new(big.Int).Rsh(value, 128), mask).Uint64() //nolint:mnd - val[5] = new(big.Int).And(new(big.Int).Rsh(value, 160), mask).Uint64() //nolint:mnd - val[6] = new(big.Int).And(new(big.Int).Rsh(value, 192), mask).Uint64() //nolint:mnd - val[7] = new(big.Int).And(new(big.Int).Rsh(value, 224), mask).Uint64() //nolint:mnd - return val -} - -// h4ToScalar converts array of 4 uint64 into a unique 256 bits scalar. -func h4ToScalar(h4 []uint64) *big.Int { - if len(h4) == 0 { - return new(big.Int) - } - result := new(big.Int).SetUint64(h4[0]) - - for i := 1; i < 4; i++ { - b2 := new(big.Int).SetUint64(h4[i]) - b2.Lsh(b2, uint(wordLength*i)) - result = result.Add(result, b2) - } - - return result -} - -// H4ToString converts array of 4 Scalars of 64 bits into an hex string. -func H4ToString(h4 []uint64) string { - sc := h4ToScalar(h4) - - return fmt.Sprintf("0x%064s", hex.EncodeToString(sc.Bytes())) -} - -// StringToh4 converts an hex string into array of 4 Scalars of 64 bits. -func StringToh4(str string) ([]uint64, error) { - str = strings.TrimPrefix(str, "0x") - - bi, ok := new(big.Int).SetString(str, hex.Base) - if !ok { - return nil, fmt.Errorf("could not convert %q into big int", str) - } - - return scalarToh4(bi), nil -} - -// scalarToh4 converts a *big.Int into an array of 4 uint64 -func scalarToh4(s *big.Int) []uint64 { - b := ScalarToFilledByteSlice(s) - - r := make([]uint64, 4) //nolint:mnd - - f, _ := hex.DecodeHex("0xFFFFFFFFFFFFFFFF") - fbe := binary.BigEndian.Uint64(f) - - r[3] = binary.BigEndian.Uint64(b[0:8]) & fbe - r[2] = binary.BigEndian.Uint64(b[8:16]) & fbe - r[1] = binary.BigEndian.Uint64(b[16:24]) & fbe - r[0] = binary.BigEndian.Uint64(b[24:]) & fbe - - return r -} - -// ScalarToFilledByteSlice converts a *big.Int into an array of maxBigIntLen -// bytes. -func ScalarToFilledByteSlice(s *big.Int) []byte { - buf := make([]byte, maxBigIntLen) - return s.FillBytes(buf) -} - -// h4ToFilledByteSlice converts an array of 4 uint64 into an array of -// maxBigIntLen bytes. -func h4ToFilledByteSlice(h4 []uint64) []byte { - return ScalarToFilledByteSlice(h4ToScalar(h4)) -} diff --git a/merkletree/split_test.go b/merkletree/split_test.go deleted file mode 100644 index eff615db7..000000000 --- a/merkletree/split_test.go +++ /dev/null @@ -1,276 +0,0 @@ -package merkletree - -import ( - "fmt" - "math/big" - "reflect" - "testing" - - "github.com/0xPolygon/cdk/hex" - "github.com/0xPolygon/cdk/test/testutils" - "github.com/stretchr/testify/require" -) - -func TestScalar2Fea(t *testing.T) { - tests := []struct { - name string - input string - expected []uint64 - }{ - { - name: "Zero value", - input: "0", - expected: []uint64{0, 0, 0, 0, 0, 0, 0, 0}, - }, - { - name: "Single 32-bit value", - input: "FFFFFFFF", - expected: []uint64{0xFFFFFFFF, 0, 0, 0, 0, 0, 0, 0}, - }, - { - name: "Mixed bits across chunks (128-bit)", - input: "1234567890ABCDEF1234567890ABCDEF", - expected: []uint64{0x90ABCDEF, 0x12345678, 0x90ABCDEF, 0x12345678, 0, 0, 0, 0}, - }, - { - name: "All bits set in each 32-bit chunk (256-bit)", - input: "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", - expected: []uint64{0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - inputVal, success := new(big.Int).SetString(tt.input, 16) - if !success { - t.Fatalf("Invalid input value: %s", tt.input) - } - - result := scalar2fea(inputVal) - - if !reflect.DeepEqual(result, tt.expected) { - t.Errorf("scalar2fea(%s) = %v, want %v", tt.input, result, tt.expected) - } - }) - } -} - -func Test_h4ToScalar(t *testing.T) { - tcs := []struct { - input []uint64 - expected string - }{ - { - input: []uint64{0, 0, 0, 0}, - expected: "0", - }, - { - input: []uint64{0, 1, 2, 3}, - expected: "18831305206160042292187933003464876175252262292329349513216", - }, - } - - for i, tc := range tcs { - tc := tc - t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { - actual := h4ToScalar(tc.input) - expected, ok := new(big.Int).SetString(tc.expected, 10) - require.True(t, ok) - require.Equal(t, expected, actual) - }) - } -} - -func Test_scalarToh4(t *testing.T) { - tcs := []struct { - input string - expected []uint64 - }{ - { - input: "0", - expected: []uint64{0, 0, 0, 0}, - }, - { - input: "18831305206160042292187933003464876175252262292329349513216", - expected: []uint64{0, 1, 2, 3}, - }, - } - - for i, tc := range tcs { - tc := tc - t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { - bi, ok := new(big.Int).SetString(tc.input, 10) - require.True(t, ok) - - actual := scalarToh4(bi) - require.Equal(t, tc.expected, actual) - }) - } -} - -func Test_h4ToString(t *testing.T) { - tcs := []struct { - input []uint64 - expected string - }{ - { - input: []uint64{0, 0, 0, 0}, - expected: "0x0000000000000000000000000000000000000000000000000000000000000000", - }, - { - input: []uint64{0, 1, 2, 3}, - expected: "0x0000000000000003000000000000000200000000000000010000000000000000", - }, - } - - for i, tc := range tcs { - tc := tc - t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { - actual := H4ToString(tc.input) - require.Equal(t, tc.expected, actual) - }) - } -} - -func Test_Conversions(t *testing.T) { - tcs := []struct { - input []uint64 - }{ - { - input: []uint64{0, 0, 0, 0}, - }, - { - input: []uint64{0, 1, 2, 3}, - }, - } - - for i, tc := range tcs { - tc := tc - t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { - resScalar := h4ToScalar(tc.input) - init := scalarToh4(resScalar) - require.Equal(t, tc.input, init) - }) - } -} - -func Test_stringToh4(t *testing.T) { - tcs := []struct { - description string - input string - expected []uint64 - expectedErr bool - expectedErrMsg string - }{ - { - description: "happy path", - input: "cafe", - expected: []uint64{51966, 0, 0, 0}, - }, - { - description: "0x prefix is allowed", - input: "0xcafe", - expected: []uint64{51966, 0, 0, 0}, - }, - - { - description: "non hex input causes error", - input: "yu74", - expectedErr: true, - expectedErrMsg: "could not convert \"yu74\" into big int", - }, - { - description: "empty input causes error", - input: "", - expectedErr: true, - expectedErrMsg: "could not convert \"\" into big int", - }, - } - - for _, tc := range tcs { - tc := tc - t.Run(tc.description, func(t *testing.T) { - actual, err := StringToh4(tc.input) - require.NoError(t, testutils.CheckError(err, tc.expectedErr, tc.expectedErrMsg)) - - require.Equal(t, tc.expected, actual) - }) - } -} - -func Test_ScalarToFilledByteSlice(t *testing.T) { - tcs := []struct { - input string - expected string - }{ - { - input: "0", - expected: "0x0000000000000000000000000000000000000000000000000000000000000000", - }, - { - input: "256", - expected: "0x0000000000000000000000000000000000000000000000000000000000000100", - }, - { - input: "235938498573495379548793890390932048239042839490238", - expected: "0x0000000000000000000000a16f882ee8972432c0a71c5e309ad5f7215690aebe", - }, - { - input: "4309593458485959083095843905390485089430985490434080439904305093450934509490", - expected: "0x098724b9a1bc97eee674cf5b6b56b8fafd83ac49c3da1f2c87c822548bbfdfb2", - }, - { - input: "98999023430240239049320492430858334093493024832984092384902398409234090932489", - expected: "0xdadf762a31e865f150a1456d7db7963c91361b771c8381a3fb879cf5bf91b909", - }, - } - - for i, tc := range tcs { - tc := tc - t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { - input, ok := big.NewInt(0).SetString(tc.input, 10) - require.True(t, ok) - - actualSlice := ScalarToFilledByteSlice(input) - - actual := hex.EncodeToHex(actualSlice) - - require.Equal(t, tc.expected, actual) - }) - } -} - -func Test_h4ToFilledByteSlice(t *testing.T) { - tcs := []struct { - input []uint64 - expected string - }{ - { - input: []uint64{0, 0, 0, 0}, - expected: "0x0000000000000000000000000000000000000000000000000000000000000000", - }, - { - input: []uint64{0, 1, 2, 3}, - expected: "0x0000000000000003000000000000000200000000000000010000000000000000", - }, - { - input: []uint64{55345354959, 991992992929, 2, 3}, - expected: "0x00000000000000030000000000000002000000e6f763d4a10000000ce2d718cf", - }, - { - input: []uint64{8398349845894398543, 3485942349435495945, 734034022234249459, 5490434584389534589}, - expected: "0x4c31f12a390ec37d0a2fd00ddc52d8f330608e18f597e609748ceeb03ffe024f", - }, - } - - for i, tc := range tcs { - tc := tc - t.Run(fmt.Sprintf("test case %d", i), func(t *testing.T) { - actualSlice := h4ToFilledByteSlice(tc.input) - - actual := hex.EncodeToHex(actualSlice) - - require.Equal(t, tc.expected, actual) - }) - } -} diff --git a/reorgdetector/config.go b/reorgdetector/config.go deleted file mode 100644 index c9a904153..000000000 --- a/reorgdetector/config.go +++ /dev/null @@ -1,29 +0,0 @@ -package reorgdetector - -import ( - "time" - - "github.com/0xPolygon/cdk/config/types" -) - -const ( - defaultCheckReorgsInterval = 2 * time.Second -) - -// Config is the configuration for the reorg detector -type Config struct { - // DBPath is the path to the database - DBPath string `mapstructure:"DBPath"` - - // CheckReorgsInterval is the interval to check for reorgs in tracked blocks - CheckReorgsInterval types.Duration `mapstructure:"CheckReorgsInterval"` -} - -// GetCheckReorgsInterval returns the interval to check for reorgs in tracked blocks -func (c *Config) GetCheckReorgsInterval() time.Duration { - if c.CheckReorgsInterval.Duration == 0 { - return defaultCheckReorgsInterval - } - - return c.CheckReorgsInterval.Duration -} diff --git a/reorgdetector/migrations/migrations.go b/reorgdetector/migrations/migrations.go deleted file mode 100644 index ba619cde6..000000000 --- a/reorgdetector/migrations/migrations.go +++ /dev/null @@ -1,21 +0,0 @@ -package migrations - -import ( - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" -) - -//go:embed reorgdetector0001.sql -var mig001 string - -func RunMigrations(dbPath string) error { - migrations := []types.Migration{ - { - ID: "reorgdetector0001", - SQL: mig001, - }, - } - return db.RunMigrations(dbPath, migrations) -} diff --git a/reorgdetector/migrations/reorgdetector0001.sql b/reorgdetector/migrations/reorgdetector0001.sql deleted file mode 100644 index 8b5092baf..000000000 --- a/reorgdetector/migrations/reorgdetector0001.sql +++ /dev/null @@ -1,11 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS block; -DROP TABLE IF EXISTS claim; -DROP TABLE IF EXISTS bridge; - --- +migrate Up -CREATE TABLE tracked_block ( - subscriber_id VARCHAR NOT NULL, - num BIGINT NOT NULL, - hash VARCHAR NOT NULL -); \ No newline at end of file diff --git a/reorgdetector/mock_eth_client.go b/reorgdetector/mock_eth_client.go deleted file mode 100644 index 1a42104be..000000000 --- a/reorgdetector/mock_eth_client.go +++ /dev/null @@ -1,220 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package reorgdetector - -import ( - context "context" - big "math/big" - - common "github.com/ethereum/go-ethereum/common" - - ethereum "github.com/ethereum/go-ethereum" - - mock "github.com/stretchr/testify/mock" - - types "github.com/ethereum/go-ethereum/core/types" -) - -// EthClientMock is an autogenerated mock type for the EthClient type -type EthClientMock struct { - mock.Mock -} - -type EthClientMock_Expecter struct { - mock *mock.Mock -} - -func (_m *EthClientMock) EXPECT() *EthClientMock_Expecter { - return &EthClientMock_Expecter{mock: &_m.Mock} -} - -// HeaderByHash provides a mock function with given fields: ctx, hash -func (_m *EthClientMock) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for HeaderByHash") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClientMock_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' -type EthClientMock_HeaderByHash_Call struct { - *mock.Call -} - -// HeaderByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *EthClientMock_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *EthClientMock_HeaderByHash_Call { - return &EthClientMock_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} -} - -func (_c *EthClientMock_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthClientMock_HeaderByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *EthClientMock_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *EthClientMock_HeaderByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClientMock_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *EthClientMock_HeaderByHash_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *EthClientMock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClientMock_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type EthClientMock_HeaderByNumber_Call struct { - *mock.Call -} - -// HeaderByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *EthClientMock_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *EthClientMock_HeaderByNumber_Call { - return &EthClientMock_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} -} - -func (_c *EthClientMock_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *EthClientMock_HeaderByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *EthClientMock_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *EthClientMock_HeaderByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClientMock_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *EthClientMock_HeaderByNumber_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeNewHead provides a mock function with given fields: ctx, ch -func (_m *EthClientMock) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - ret := _m.Called(ctx, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeNewHead") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { - return rf(ctx, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { - r0 = rf(ctx, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { - r1 = rf(ctx, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// EthClientMock_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' -type EthClientMock_SubscribeNewHead_Call struct { - *mock.Call -} - -// SubscribeNewHead is a helper method to define mock.On call -// - ctx context.Context -// - ch chan<- *types.Header -func (_e *EthClientMock_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *EthClientMock_SubscribeNewHead_Call { - return &EthClientMock_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} -} - -func (_c *EthClientMock_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *EthClientMock_SubscribeNewHead_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(chan<- *types.Header)) - }) - return _c -} - -func (_c *EthClientMock_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *EthClientMock_SubscribeNewHead_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EthClientMock_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *EthClientMock_SubscribeNewHead_Call { - _c.Call.Return(run) - return _c -} - -// NewEthClientMock creates a new instance of EthClientMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEthClientMock(t interface { - mock.TestingT - Cleanup(func()) -}) *EthClientMock { - mock := &EthClientMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/reorgdetector/reorgdetector.go b/reorgdetector/reorgdetector.go deleted file mode 100644 index a25495914..000000000 --- a/reorgdetector/reorgdetector.go +++ /dev/null @@ -1,227 +0,0 @@ -package reorgdetector - -import ( - "context" - "database/sql" - "fmt" - "math/big" - "sync" - "time" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector/migrations" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rpc" - "golang.org/x/sync/errgroup" -) - -type Network string - -const ( - L1 Network = "l1" - L2 Network = "l2" -) - -func (n Network) String() string { - return string(n) -} - -type EthClient interface { - SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) - HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) - HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) -} - -type ReorgDetector struct { - client EthClient - db *sql.DB - checkReorgInterval time.Duration - - trackedBlocksLock sync.RWMutex - trackedBlocks map[string]*headersList - - subscriptionsLock sync.RWMutex - subscriptions map[string]*Subscription - - log *log.Logger -} - -func New(client EthClient, cfg Config, network Network) (*ReorgDetector, error) { - err := migrations.RunMigrations(cfg.DBPath) - if err != nil { - return nil, err - } - db, err := db.NewSQLiteDB(cfg.DBPath) - if err != nil { - return nil, err - } - - return &ReorgDetector{ - client: client, - db: db, - checkReorgInterval: cfg.GetCheckReorgsInterval(), - trackedBlocks: make(map[string]*headersList), - subscriptions: make(map[string]*Subscription), - log: log.WithFields("reorg-detector", network.String()), - }, nil -} - -// Start starts the reorg detector -func (rd *ReorgDetector) Start(ctx context.Context) (err error) { - // Load tracked blocks from the DB - if err = rd.loadTrackedHeaders(); err != nil { - return fmt.Errorf("failed to load tracked headers: %w", err) - } - - // Continuously check reorgs in tracked by subscribers blocks - go func() { - ticker := time.NewTicker(rd.checkReorgInterval) - for { - select { - case <-ctx.Done(): - ticker.Stop() - return - case <-ticker.C: - if err = rd.detectReorgInTrackedList(ctx); err != nil { - log.Errorf("failed to detect reorg in tracked list: %v", err) - } - } - } - }() - - return nil -} - -// AddBlockToTrack adds a block to the tracked list for a subscriber -func (rd *ReorgDetector) AddBlockToTrack(ctx context.Context, id string, num uint64, hash common.Hash) error { - // Skip if the given block has already been stored - rd.trackedBlocksLock.RLock() - trackedBlocks, ok := rd.trackedBlocks[id] - if !ok { - rd.trackedBlocksLock.RUnlock() - return fmt.Errorf("subscriber %s is not subscribed", id) - } - rd.trackedBlocksLock.RUnlock() - - if existingHeader, err := trackedBlocks.get(num); err == nil && existingHeader.Hash == hash { - return nil - } - - // Store the given header to the tracked list - hdr := newHeader(num, hash) - if err := rd.saveTrackedBlock(id, hdr); err != nil { - return fmt.Errorf("failed to save tracked block: %w", err) - } - - return nil -} - -// detectReorgInTrackedList detects reorgs in the tracked blocks. -// Notifies subscribers if reorg has happened -func (rd *ReorgDetector) detectReorgInTrackedList(ctx context.Context) error { - // Get the latest finalized block - lastFinalisedBlock, err := rd.client.HeaderByNumber(ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return fmt.Errorf("failed to get the latest finalized block: %w", err) - } - - var ( - headersCacheLock sync.Mutex - headersCache = map[uint64]*types.Header{ - lastFinalisedBlock.Number.Uint64(): lastFinalisedBlock, - } - errGroup errgroup.Group - ) - - subscriberIDs := rd.getSubscriberIDs() - - for _, id := range subscriberIDs { - id := id - - // This is done like this because of a possible deadlock - // between AddBlocksToTrack and detectReorgInTrackedList - rd.trackedBlocksLock.RLock() - hdrs, ok := rd.trackedBlocks[id] - rd.trackedBlocksLock.RUnlock() - - if !ok { - continue - } - - rd.log.Debugf("Checking reorgs in tracked blocks up to block %d", lastFinalisedBlock.Number.Uint64()) - - errGroup.Go(func() error { - headers := hdrs.getSorted() - for _, hdr := range headers { - // Get the actual header from the network or from the cache - var err error - headersCacheLock.Lock() - currentHeader, ok := headersCache[hdr.Num] - if !ok || currentHeader == nil { - if currentHeader, err = rd.client.HeaderByNumber(ctx, new(big.Int).SetUint64(hdr.Num)); err != nil { - headersCacheLock.Unlock() - return fmt.Errorf("failed to get the header %d: %w", hdr.Num, err) - } - headersCache[hdr.Num] = currentHeader - } - headersCacheLock.Unlock() - - // Check if the block hash matches with the actual block hash - if hdr.Hash == currentHeader.Hash() { - // Delete block from the tracked blocks list if it is less than or equal to the last finalized block - // and hashes matches. If higher than finalized block, we assume a reorg still might happen. - if hdr.Num <= lastFinalisedBlock.Number.Uint64() { - hdrs.removeRange(hdr.Num, hdr.Num) - - if err := rd.removeTrackedBlockRange(id, hdr.Num, hdr.Num); err != nil { - return fmt.Errorf("error removing blocks from DB for subscriber %s between blocks %d and %d: %w", - id, hdr.Num, hdr.Num, err) - } - } - - continue - } - - // Notify the subscriber about the reorg - rd.notifySubscriber(id, hdr) - - // Remove the reorged block and all the following blocks from DB - if err := rd.removeTrackedBlockRange(id, hdr.Num, headers[len(headers)-1].Num); err != nil { - return fmt.Errorf("error removing blocks from DB for subscriber %s between blocks %d and %d: %w", - id, hdr.Num, headers[len(headers)-1].Num, err) - } - // Remove the reorged block and all the following blocks from memory - hdrs.removeRange(hdr.Num, headers[len(headers)-1].Num) - - break - } - return nil - }) - } - - return errGroup.Wait() -} - -// loadTrackedHeaders loads tracked headers from the DB and stores them in memory -func (rd *ReorgDetector) loadTrackedHeaders() (err error) { - rd.trackedBlocksLock.Lock() - defer rd.trackedBlocksLock.Unlock() - - // Load tracked blocks for all subscribers from the DB - if rd.trackedBlocks, err = rd.getTrackedBlocks(); err != nil { - return fmt.Errorf("failed to get tracked blocks: %w", err) - } - - // Go over tracked blocks and create subscription for each tracker - for id := range rd.trackedBlocks { - rd.subscriptions[id] = &Subscription{ - ReorgedBlock: make(chan uint64), - ReorgProcessed: make(chan bool), - } - } - - return nil -} diff --git a/reorgdetector/reorgdetector_db.go b/reorgdetector/reorgdetector_db.go deleted file mode 100644 index 72bc40a7c..000000000 --- a/reorgdetector/reorgdetector_db.go +++ /dev/null @@ -1,80 +0,0 @@ -package reorgdetector - -import ( - "errors" - "fmt" - - "github.com/0xPolygon/cdk/db" - "github.com/russross/meddler" -) - -// getTrackedBlocks returns a list of tracked blocks for each subscriber from db -func (rd *ReorgDetector) getTrackedBlocks() (map[string]*headersList, error) { - trackedBlocks := make(map[string]*headersList, 0) - var headersWithID []*headerWithSubscriberID - err := meddler.QueryAll(rd.db, &headersWithID, "SELECT * FROM tracked_block ORDER BY subscriber_id;") - if err != nil { - if errors.Is(err, db.ErrNotFound) { - return trackedBlocks, nil - } - return nil, fmt.Errorf("error queryng tracked_block: %w", err) - } - if len(headersWithID) == 0 { - return trackedBlocks, nil - } - currentID := headersWithID[0].SubscriberID - currentHeaders := []header{} - for i := 0; i < len(headersWithID); i++ { - if i == len(headersWithID)-1 { - currentHeaders = append(currentHeaders, header{ - Num: headersWithID[i].Num, - Hash: headersWithID[i].Hash, - }) - trackedBlocks[currentID] = newHeadersList(currentHeaders...) - } else if headersWithID[i].SubscriberID != currentID { - trackedBlocks[currentID] = newHeadersList(currentHeaders...) - currentHeaders = []header{{ - Num: headersWithID[i].Num, - Hash: headersWithID[i].Hash, - }} - currentID = headersWithID[i].SubscriberID - } else { - currentHeaders = append(currentHeaders, header{ - Num: headersWithID[i].Num, - Hash: headersWithID[i].Hash, - }) - } - } - - return trackedBlocks, nil -} - -// saveTrackedBlock saves the tracked block for a subscriber in db and in memory -func (rd *ReorgDetector) saveTrackedBlock(id string, b header) error { - rd.trackedBlocksLock.Lock() - hdrs, ok := rd.trackedBlocks[id] - if !ok || hdrs.isEmpty() { - hdrs = newHeadersList(b) - rd.trackedBlocks[id] = hdrs - } else { - hdrs.add(b) - } - - rd.log.Debugf("Tracking block %d for subscriber %s", b.Num, id) - - rd.trackedBlocksLock.Unlock() - return meddler.Insert(rd.db, "tracked_block", &headerWithSubscriberID{ - SubscriberID: id, - Num: b.Num, - Hash: b.Hash, - }) -} - -// updateTrackedBlocksDB updates the tracked blocks for a subscriber in db -func (rd *ReorgDetector) removeTrackedBlockRange(id string, fromBlock, toBlock uint64) error { - _, err := rd.db.Exec( - "DELETE FROM tracked_block WHERE num >= $1 AND num <= $2 AND subscriber_id = $3;", - fromBlock, toBlock, id, - ) - return err -} diff --git a/reorgdetector/reorgdetector_sub.go b/reorgdetector/reorgdetector_sub.go deleted file mode 100644 index 7ff04aea9..000000000 --- a/reorgdetector/reorgdetector_sub.go +++ /dev/null @@ -1,58 +0,0 @@ -package reorgdetector - -// Subscription is a subscription to reorg events -type Subscription struct { - ReorgedBlock chan uint64 - ReorgProcessed chan bool -} - -// Subscribe subscribes to reorg events -func (rd *ReorgDetector) Subscribe(id string) (*Subscription, error) { - rd.subscriptionsLock.Lock() - defer rd.subscriptionsLock.Unlock() - - if sub, ok := rd.subscriptions[id]; ok { - return sub, nil - } - - // Create a new subscription - sub := &Subscription{ - ReorgedBlock: make(chan uint64), - ReorgProcessed: make(chan bool), - } - rd.subscriptions[id] = sub - - // Create a new tracked blocks list for the subscriber - rd.trackedBlocksLock.Lock() - rd.trackedBlocks[id] = newHeadersList() - rd.trackedBlocksLock.Unlock() - - return sub, nil -} - -// notifySubscriber notifies the subscriber with the block of the reorg -func (rd *ReorgDetector) notifySubscriber(id string, startingBlock header) { - // Notify subscriber about this particular reorg - rd.subscriptionsLock.RLock() - sub, ok := rd.subscriptions[id] - rd.subscriptionsLock.RUnlock() - - if ok { - rd.log.Infof("Reorg detected for subscriber %s at block %d", id, startingBlock.Num) - sub.ReorgedBlock <- startingBlock.Num - <-sub.ReorgProcessed - } -} - -// getSubscriberIDs returns a list of subscriber IDs -func (rd *ReorgDetector) getSubscriberIDs() []string { - rd.subscriptionsLock.RLock() - defer rd.subscriptionsLock.RUnlock() - - ids := make([]string, 0, len(rd.subscriptions)) - for id := range rd.subscriptions { - ids = append(ids, id) - } - - return ids -} diff --git a/reorgdetector/reorgdetector_test.go b/reorgdetector/reorgdetector_test.go deleted file mode 100644 index 788cf85a7..000000000 --- a/reorgdetector/reorgdetector_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package reorgdetector - -import ( - "context" - big "math/big" - "path" - "strings" - "sync" - "testing" - "time" - - cdktypes "github.com/0xPolygon/cdk/config/types" - common "github.com/ethereum/go-ethereum/common" - types "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/ethereum/go-ethereum/rpc" - "github.com/stretchr/testify/require" -) - -func Test_ReorgDetector(t *testing.T) { - const subID = "test" - - ctx := context.Background() - - // Simulated L1 - clientL1 := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - - // Create test DB dir - testDir := path.Join(t.TempDir(), "reorgdetectorTest_ReorgDetector.sqlite") - - reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, L1) - require.NoError(t, err) - - err = reorgDetector.Start(ctx) - require.NoError(t, err) - - reorgSub, err := reorgDetector.Subscribe(subID) - require.NoError(t, err) - - // Block 1 - header1, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) - require.NoError(t, err) - require.Equal(t, uint64(1), header1.Number.Uint64()) - err = reorgDetector.AddBlockToTrack(ctx, subID, header1.Number.Uint64(), header1.Hash()) // Adding block 1 - require.NoError(t, err) - - // Block 2 - header2, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) - require.NoError(t, err) - require.Equal(t, uint64(2), header2.Number.Uint64()) - err = reorgDetector.AddBlockToTrack(ctx, subID, header2.Number.Uint64(), header2.Hash()) // Adding block 1 - require.NoError(t, err) - - // Block 3 - header3Reorged, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) - require.NoError(t, err) - require.Equal(t, uint64(3), header3Reorged.Number.Uint64()) - err = reorgDetector.AddBlockToTrack(ctx, subID, header3Reorged.Number.Uint64(), header3Reorged.Hash()) // Adding block 3 - require.NoError(t, err) - - // Block 4 - header4Reorged, err := clientL1.Client().HeaderByHash(ctx, clientL1.Commit()) - require.Equal(t, uint64(4), header4Reorged.Number.Uint64()) - require.NoError(t, err) - err = reorgDetector.AddBlockToTrack(ctx, subID, header4Reorged.Number.Uint64(), header4Reorged.Hash()) // Adding block 4 - require.NoError(t, err) - - err = clientL1.Fork(header2.Hash()) // Reorg on block 2 (block 2 is still valid) - require.NoError(t, err) - - // Make sure that the new canonical chain is longer than the previous one so the reorg is visible to the detector - header3AfterReorg := clientL1.Commit() // Next block 3 after reorg on block 2 - require.NotEqual(t, header3Reorged.Hash(), header3AfterReorg) - header4AfterReorg := clientL1.Commit() // Block 4 - require.NotEqual(t, header4Reorged.Hash(), header4AfterReorg) - clientL1.Commit() // Block 5 - - // Expect reorg on added blocks 3 -> all further blocks should be removed - select { - case firstReorgedBlock := <-reorgSub.ReorgedBlock: - reorgSub.ReorgProcessed <- true - require.Equal(t, header3Reorged.Number.Uint64(), firstReorgedBlock) - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for reorg") - } - - // just wait a little for completion - time.Sleep(time.Second / 5) - - reorgDetector.trackedBlocksLock.Lock() - headersList, ok := reorgDetector.trackedBlocks[subID] - reorgDetector.trackedBlocksLock.Unlock() - require.True(t, ok) - require.Equal(t, 2, headersList.len()) // Only blocks 1 and 2 left - actualHeader1, err := headersList.get(1) - require.NoError(t, err) - require.Equal(t, header1.Hash(), actualHeader1.Hash) - actualHeader2, err := headersList.get(2) - require.NoError(t, err) - require.Equal(t, header2.Hash(), actualHeader2.Hash) -} - -func TestGetTrackedBlocks(t *testing.T) { - clientL1 := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - testDir := path.Join(t.TempDir(), "reorgdetector_TestGetTrackedBlocks.sqlite") - reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, L1) - require.NoError(t, err) - list, err := reorgDetector.getTrackedBlocks() - require.NoError(t, err) - require.Equal(t, len(list), 0) - - expectedList := make(map[string]*headersList) - headersMapFoo := make(map[uint64]header) - headerFoo2 := header{ - Num: 2, - Hash: common.HexToHash("foofoo"), - } - err = reorgDetector.saveTrackedBlock("foo", headerFoo2) - require.NoError(t, err) - headersMapFoo[2] = headerFoo2 - headerFoo3 := header{ - Num: 3, - Hash: common.HexToHash("foofoofoo"), - } - err = reorgDetector.saveTrackedBlock("foo", headerFoo3) - require.NoError(t, err) - headersMapFoo[3] = headerFoo3 - expectedList["foo"] = &headersList{ - headers: headersMapFoo, - } - list, err = reorgDetector.getTrackedBlocks() - require.NoError(t, err) - require.Equal(t, expectedList, list) - - headersMapBar := make(map[uint64]header) - headerBar2 := header{ - Num: 2, - Hash: common.HexToHash("BarBar"), - } - err = reorgDetector.saveTrackedBlock("Bar", headerBar2) - require.NoError(t, err) - headersMapBar[2] = headerBar2 - expectedList["Bar"] = &headersList{ - headers: headersMapBar, - } - list, err = reorgDetector.getTrackedBlocks() - require.NoError(t, err) - require.Equal(t, expectedList, list) - - require.NoError(t, reorgDetector.loadTrackedHeaders()) - _, ok := reorgDetector.subscriptions["foo"] - require.True(t, ok) - _, ok = reorgDetector.subscriptions["Bar"] - require.True(t, ok) -} - -func TestNotSubscribed(t *testing.T) { - clientL1 := simulated.NewBackend(nil, simulated.WithBlockGasLimit(10000000)) - testDir := path.Join(t.TempDir(), "reorgdetectorTestNotSubscribed.sqlite") - reorgDetector, err := New(clientL1.Client(), Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, L1) - require.NoError(t, err) - err = reorgDetector.AddBlockToTrack(context.Background(), "foo", 1, common.Hash{}) - require.True(t, strings.Contains(err.Error(), "is not subscribed")) -} - -func TestDetectReorgs(t *testing.T) { - t.Parallel() - - ctx := context.Background() - syncerID := "test-syncer" - trackedBlock := &types.Header{Number: big.NewInt(9)} - - t.Run("Block not finalized", func(t *testing.T) { - t.Parallel() - - lastFinalizedBlock := &types.Header{Number: big.NewInt(8)} - client := NewEthClientMock(t) - client.On("HeaderByNumber", ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))).Return(lastFinalizedBlock, nil) - client.On("HeaderByNumber", ctx, trackedBlock.Number).Return(trackedBlock, nil) - - testDir := path.Join(t.TempDir(), "reorgdetectorTestDetectReorgs.sqlite") - reorgDetector, err := New(client, Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, L1) - require.NoError(t, err) - - _, err = reorgDetector.Subscribe(syncerID) - require.NoError(t, err) - require.NoError(t, reorgDetector.AddBlockToTrack(ctx, syncerID, trackedBlock.Number.Uint64(), trackedBlock.Hash())) - - require.NoError(t, reorgDetector.detectReorgInTrackedList(ctx)) - - trackedBlocks, err := reorgDetector.getTrackedBlocks() - require.NoError(t, err) - require.Equal(t, 1, len(trackedBlocks)) - - syncerTrackedBlocks, ok := trackedBlocks[syncerID] - require.True(t, ok) - require.Equal(t, 1, syncerTrackedBlocks.len()) - }) - - t.Run("Block finalized", func(t *testing.T) { - t.Parallel() - - lastFinalizedBlock := trackedBlock - client := NewEthClientMock(t) - client.On("HeaderByNumber", ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))).Return(lastFinalizedBlock, nil) - - testDir := path.Join(t.TempDir(), "reorgdetectorTestDetectReorgs.sqlite") - reorgDetector, err := New(client, Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, L1) - require.NoError(t, err) - - _, err = reorgDetector.Subscribe(syncerID) - require.NoError(t, err) - require.NoError(t, reorgDetector.AddBlockToTrack(ctx, syncerID, trackedBlock.Number.Uint64(), trackedBlock.Hash())) - - require.NoError(t, reorgDetector.detectReorgInTrackedList(ctx)) - - trackedBlocks, err := reorgDetector.getTrackedBlocks() - require.NoError(t, err) - require.Equal(t, 0, len(trackedBlocks)) - }) - - t.Run("Reorg happened", func(t *testing.T) { - t.Parallel() - - lastFinalizedBlock := &types.Header{Number: big.NewInt(5)} - reorgedTrackedBlock := &types.Header{Number: trackedBlock.Number, Extra: []byte("reorged")} // Different hash - - client := NewEthClientMock(t) - client.On("HeaderByNumber", ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))).Return(lastFinalizedBlock, nil) - client.On("HeaderByNumber", ctx, trackedBlock.Number).Return(reorgedTrackedBlock, nil) - - testDir := path.Join(t.TempDir(), "reorgdetectorTestDetectReorgs.sqlite") - reorgDetector, err := New(client, Config{DBPath: testDir, CheckReorgsInterval: cdktypes.NewDuration(time.Millisecond * 100)}, L1) - require.NoError(t, err) - - subscription, err := reorgDetector.Subscribe(syncerID) - require.NoError(t, err) - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - <-subscription.ReorgedBlock - subscription.ReorgProcessed <- true - - wg.Done() - }() - - require.NoError(t, reorgDetector.AddBlockToTrack(ctx, syncerID, trackedBlock.Number.Uint64(), trackedBlock.Hash())) - - require.NoError(t, reorgDetector.detectReorgInTrackedList(ctx)) - - wg.Wait() // we wait here to make sure the reorg is processed - - trackedBlocks, err := reorgDetector.getTrackedBlocks() - require.NoError(t, err) - require.Equal(t, 0, len(trackedBlocks)) // shouldn't be any since a reorg happened on that block - }) -} diff --git a/reorgdetector/types.go b/reorgdetector/types.go deleted file mode 100644 index 35a73513a..000000000 --- a/reorgdetector/types.go +++ /dev/null @@ -1,119 +0,0 @@ -package reorgdetector - -import ( - "sort" - "sync" - - "github.com/0xPolygon/cdk/db" - "github.com/ethereum/go-ethereum/common" -) - -type header struct { - Num uint64 `meddler:"num"` - Hash common.Hash `meddler:"hash,hash"` -} - -type headerWithSubscriberID struct { - SubscriberID string `meddler:"subscriber_id"` - Num uint64 `meddler:"num"` - Hash common.Hash `meddler:"hash,hash"` -} - -// newHeader returns a new instance of header -func newHeader(num uint64, hash common.Hash) header { - return header{ - Num: num, - Hash: hash, - } -} - -type headersList struct { - sync.RWMutex - headers map[uint64]header -} - -// newHeadersList returns a new instance of headersList -func newHeadersList(headers ...header) *headersList { - headersMap := make(map[uint64]header, len(headers)) - - for _, b := range headers { - headersMap[b.Num] = b - } - - return &headersList{ - headers: headersMap, - } -} - -// len returns the number of headers in the headers list -func (hl *headersList) len() int { - hl.RLock() - ln := len(hl.headers) - hl.RUnlock() - return ln -} - -// isEmpty returns true if the headers list is empty -func (hl *headersList) isEmpty() bool { - return hl.len() == 0 -} - -// add adds a header to the headers list -func (hl *headersList) add(h header) { - hl.Lock() - hl.headers[h.Num] = h - hl.Unlock() -} - -// copy returns a copy of the headers list -func (hl *headersList) copy() *headersList { - hl.RLock() - defer hl.RUnlock() - - headersMap := make(map[uint64]header, len(hl.headers)) - for k, v := range hl.headers { - headersMap[k] = v - } - - return &headersList{ - headers: headersMap, - } -} - -// get returns a header by block number -func (hl *headersList) get(num uint64) (*header, error) { - hl.RLock() - defer hl.RUnlock() - - if b, ok := hl.headers[num]; ok { - return &b, nil - } - - return nil, db.ErrNotFound -} - -// getSorted returns headers in sorted order -func (hl *headersList) getSorted() []header { - hl.RLock() - sortedBlocks := make([]header, 0, len(hl.headers)) - - for _, b := range hl.headers { - sortedBlocks = append(sortedBlocks, b) - } - hl.RUnlock() - - sort.Slice(sortedBlocks, func(i, j int) bool { - return sortedBlocks[i].Num < sortedBlocks[j].Num - }) - - return sortedBlocks -} - -// removeRange removes headers from "from" to "to" -func (hl *headersList) removeRange(from, to uint64) { - hl.Lock() - for i := from; i <= to; i++ { - delete(hl.headers, i) - } - hl.Unlock() -} diff --git a/reorgdetector/types_test.go b/reorgdetector/types_test.go deleted file mode 100644 index d2562078c..000000000 --- a/reorgdetector/types_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package reorgdetector - -import ( - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestBlockMap(t *testing.T) { - t.Parallel() - - // Create a new block map - bm := newHeadersList( - header{Num: 1, Hash: common.HexToHash("0x123")}, - header{Num: 2, Hash: common.HexToHash("0x456")}, - header{Num: 3, Hash: common.HexToHash("0x789")}, - ) - - t.Run("len", func(t *testing.T) { - t.Parallel() - - actualLen := bm.len() - expectedLen := 3 - if !reflect.DeepEqual(expectedLen, actualLen) { - t.Errorf("len() returned incorrect result, expected: %v, got: %v", expectedLen, actualLen) - } - }) - - t.Run("isEmpty", func(t *testing.T) { - t.Parallel() - - if bm.isEmpty() { - t.Error("isEmpty() returned incorrect result, expected: false, got: true") - } - }) - - t.Run("add", func(t *testing.T) { - t.Parallel() - - bm := bm.copy() - tba := header{Num: 4, Hash: common.HexToHash("0xabc")} - bm.add(tba) - if !reflect.DeepEqual(tba, bm.headers[4]) { - t.Errorf("add() returned incorrect result, expected: %v, got: %v", tba, bm.headers[4]) - } - }) - - t.Run("copy", func(t *testing.T) { - t.Parallel() - - copiedBm := bm.copy() - for i, header := range bm.headers { - copiedHeader, exists := copiedBm.headers[i] - require.True(t, exists) - if !reflect.DeepEqual(header, copiedHeader) { - t.Errorf("copy() returned incorrect result, expected: %v, got: %v", header, copiedHeader) - } - } - }) - - t.Run("get", func(t *testing.T) { - t.Parallel() - - header, err := bm.get(3) - require.NoError(t, err) - if !reflect.DeepEqual(*header, bm.headers[3]) { - t.Errorf("get() returned incorrect result, expected: %v, got: %v", header, bm.headers[3]) - } - }) - - t.Run("getSorted", func(t *testing.T) { - t.Parallel() - - sortedBlocks := bm.getSorted() - expectedSortedBlocks := []header{ - {Num: 1, Hash: common.HexToHash("0x123")}, - {Num: 2, Hash: common.HexToHash("0x456")}, - {Num: 3, Hash: common.HexToHash("0x789")}, - } - if !reflect.DeepEqual(sortedBlocks, expectedSortedBlocks) { - t.Errorf("getSorted() returned incorrect result, expected: %v, got: %v", expectedSortedBlocks, sortedBlocks) - } - }) - - t.Run("removeRange", func(t *testing.T) { - t.Parallel() - - bm := newHeadersList( - header{Num: 1, Hash: common.HexToHash("0x123")}, - header{Num: 2, Hash: common.HexToHash("0x456")}, - header{Num: 3, Hash: common.HexToHash("0x789")}, - header{Num: 4, Hash: common.HexToHash("0xabc")}, - header{Num: 5, Hash: common.HexToHash("0xdef")}, - ) - - bm.removeRange(3, 5) - - expectedBlocks := []header{ - {Num: 1, Hash: common.HexToHash("0x123")}, - {Num: 2, Hash: common.HexToHash("0x456")}, - } - - sortedBlocks := bm.getSorted() - - if !reflect.DeepEqual(sortedBlocks, expectedBlocks) { - t.Errorf("removeRange() failed, expected: %v, got: %v", expectedBlocks, sortedBlocks) - } - }) -} diff --git a/rpc/batch.go b/rpc/batch.go index bd75a3748..6ee4eeb3d 100644 --- a/rpc/batch.go +++ b/rpc/batch.go @@ -7,9 +7,9 @@ import ( "math/big" "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/state" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ) diff --git a/rpc/bridge.go b/rpc/bridge.go deleted file mode 100644 index 65d949710..000000000 --- a/rpc/bridge.go +++ /dev/null @@ -1,364 +0,0 @@ -package rpc - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/rpc/types" - tree "github.com/0xPolygon/cdk/tree/types" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" -) - -const ( - // BRIDGE is the namespace of the bridge service - BRIDGE = "bridge" - meterName = "github.com/0xPolygon/cdk/rpc" - - zeroHex = "0x0" - binnarySearchDivider = 2 -) - -var ( - ErrNotOnL1Info = errors.New("this bridge has not been included on the L1 Info Tree yet") -) - -// BridgeEndpoints contains implementations for the "bridge" RPC endpoints -type BridgeEndpoints struct { - logger *log.Logger - meter metric.Meter - readTimeout time.Duration - writeTimeout time.Duration - networkID uint32 - sponsor ClaimSponsorer - l1InfoTree L1InfoTreer - injectedGERs LastGERer - bridgeL1 Bridger - bridgeL2 Bridger -} - -// NewBridgeEndpoints returns InteropEndpoints -func NewBridgeEndpoints( - logger *log.Logger, - writeTimeout time.Duration, - readTimeout time.Duration, - networkID uint32, - sponsor ClaimSponsorer, - l1InfoTree L1InfoTreer, - injectedGERs LastGERer, - bridgeL1 Bridger, - bridgeL2 Bridger, -) *BridgeEndpoints { - meter := otel.Meter(meterName) - return &BridgeEndpoints{ - logger: logger, - meter: meter, - readTimeout: readTimeout, - writeTimeout: writeTimeout, - networkID: networkID, - sponsor: sponsor, - l1InfoTree: l1InfoTree, - injectedGERs: injectedGERs, - bridgeL1: bridgeL1, - bridgeL2: bridgeL2, - } -} - -// L1InfoTreeIndexForBridge returns the first L1 Info Tree index in which the bridge was included. -// networkID represents the origin network. -// This call needs to be done to a client of the same network were the bridge tx was sent -func (b *BridgeEndpoints) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (interface{}, rpc.Error) { - ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) - defer cancel() - - c, merr := b.meter.Int64Counter("l1_info_tree_index_for_bridge") - if merr != nil { - b.logger.Warnf("failed to create l1_info_tree_index_for_bridge counter: %s", merr) - } - c.Add(ctx, 1) - - if networkID == 0 { - l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL1Bridge(ctx, depositCount) - // TODO: special treatment of the error when not found, - // as it's expected that it will take some time for the L1 Info tree to be updated - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( - "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), - ) - } - return l1InfoTreeIndex, nil - } - if networkID == b.networkID { - l1InfoTreeIndex, err := b.getFirstL1InfoTreeIndexForL2Bridge(ctx, depositCount) - // TODO: special treatment of the error when not found, - // as it's expected that it will take some time for the L1 Info tree to be updated - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf( - "failed to get l1InfoTreeIndex for networkID %d and deposit count %d, error: %s", networkID, depositCount, err), - ) - } - return l1InfoTreeIndex, nil - } - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - fmt.Sprintf("this client does not support network %d", networkID), - ) -} - -// InjectedInfoAfterIndex return the first GER injected onto the network that is linked -// to the given index or greater. This call is useful to understand when a bridge is ready to be claimed -// on its destination network -func (b *BridgeEndpoints) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (interface{}, rpc.Error) { - ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) - defer cancel() - - c, merr := b.meter.Int64Counter("injected_info_after_index") - if merr != nil { - b.logger.Warnf("failed to create injected_info_after_index counter: %s", merr) - } - c.Add(ctx, 1) - - if networkID == 0 { - info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) - } - return info, nil - } - if networkID == b.networkID { - e, err := b.injectedGERs.GetFirstGERAfterL1InfoTreeIndex(ctx, l1InfoTreeIndex) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) - } - info, err := b.l1InfoTree.GetInfoByIndex(ctx, e.L1InfoTreeIndex) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get global exit root, error: %s", err)) - } - return info, nil - } - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - fmt.Sprintf("this client does not support network %d", networkID), - ) -} - -// GetProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin -// while globalExitRoot should be already injected on the destination network. -// This call needs to be done to a client of the same network were the bridge tx was sent -func (b *BridgeEndpoints) GetProof( - networkID uint32, depositCount uint32, l1InfoTreeIndex uint32, -) (interface{}, rpc.Error) { - ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) - defer cancel() - - c, merr := b.meter.Int64Counter("claim_proof") - if merr != nil { - b.logger.Warnf("failed to create claim_proof counter: %s", merr) - } - c.Add(ctx, 1) - - info, err := b.l1InfoTree.GetInfoByIndex(ctx, l1InfoTreeIndex) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get info from the tree: %s", err)) - } - proofRollupExitRoot, err := b.l1InfoTree.GetRollupExitTreeMerkleProof(ctx, networkID, info.GlobalExitRoot) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get rollup exit proof, error: %s", err)) - } - var proofLocalExitRoot tree.Proof - switch { - case networkID == 0: - proofLocalExitRoot, err = b.bridgeL1.GetProof(ctx, depositCount, info.MainnetExitRoot) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get local exit proof, error: %s", err)) - } - - case networkID == b.networkID: - localExitRoot, err := b.l1InfoTree.GetLocalExitRoot(ctx, networkID, info.RollupExitRoot) - if err != nil { - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - fmt.Sprintf("failed to get local exit root from rollup exit tree, error: %s", err), - ) - } - proofLocalExitRoot, err = b.bridgeL2.GetProof(ctx, depositCount, localExitRoot) - if err != nil { - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - fmt.Sprintf("failed to get local exit proof, error: %s", err), - ) - } - - default: - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - fmt.Sprintf("this client does not support network %d", networkID), - ) - } - return types.ClaimProof{ - ProofLocalExitRoot: proofLocalExitRoot, - ProofRollupExitRoot: proofRollupExitRoot, - L1InfoTreeLeaf: *info, - }, nil -} - -// SponsorClaim sends a claim tx on behalf of the user. -// This call needs to be done to a client of the same network were the claim is going to be sent (bridge destination) -func (b *BridgeEndpoints) SponsorClaim(claim claimsponsor.Claim) (interface{}, rpc.Error) { - ctx, cancel := context.WithTimeout(context.Background(), b.writeTimeout) - defer cancel() - - c, merr := b.meter.Int64Counter("sponsor_claim") - if merr != nil { - b.logger.Warnf("failed to create sponsor_claim counter: %s", merr) - } - c.Add(ctx, 1) - - if b.sponsor == nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring") - } - if claim.DestinationNetwork != b.networkID { - return zeroHex, rpc.NewRPCError( - rpc.DefaultErrorCode, - fmt.Sprintf("this client only sponsors claims for network %d", b.networkID), - ) - } - if err := b.sponsor.AddClaimToQueue(&claim); err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("error adding claim to the queue %s", err)) - } - return nil, nil -} - -// GetSponsoredClaimStatus returns the status of a claim that has been previously requested to be sponsored. -// This call needs to be done to the same client were it was requested to be sponsored -func (b *BridgeEndpoints) GetSponsoredClaimStatus(globalIndex *big.Int) (interface{}, rpc.Error) { - ctx, cancel := context.WithTimeout(context.Background(), b.readTimeout) - defer cancel() - - c, merr := b.meter.Int64Counter("get_sponsored_claim_status") - if merr != nil { - b.logger.Warnf("failed to create get_sponsored_claim_status counter: %s", merr) - } - c.Add(ctx, 1) - - if b.sponsor == nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, "this client does not support claim sponsoring") - } - claim, err := b.sponsor.GetClaim(globalIndex) - if err != nil { - return zeroHex, rpc.NewRPCError(rpc.DefaultErrorCode, fmt.Sprintf("failed to get claim status, error: %s", err)) - } - return claim.Status, nil -} - -func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL1Bridge(ctx context.Context, depositCount uint32) (uint32, error) { - lastInfo, err := b.l1InfoTree.GetLastInfo() - if err != nil { - return 0, err - } - - root, err := b.bridgeL1.GetRootByLER(ctx, lastInfo.MainnetExitRoot) - if err != nil { - return 0, err - } - if root.Index < depositCount { - return 0, ErrNotOnL1Info - } - - firstInfo, err := b.l1InfoTree.GetFirstInfo() - if err != nil { - return 0, err - } - - // Binary search between the first and last blcoks where L1 info tree was updated. - // Find the smallest l1 info tree index that is greater than depositCount and matches with - // a MER that is included on the l1 info tree - bestResult := lastInfo - lowerLimit := firstInfo.BlockNumber - upperLimit := lastInfo.BlockNumber - for lowerLimit <= upperLimit { - targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) - targetInfo, err := b.l1InfoTree.GetFirstInfoAfterBlock(targetBlock) - if err != nil { - return 0, err - } - root, err := b.bridgeL1.GetRootByLER(ctx, targetInfo.MainnetExitRoot) - if err != nil { - return 0, err - } - if root.Index < depositCount { - lowerLimit = targetBlock + 1 - } else if root.Index == depositCount { - bestResult = targetInfo - break - } else { - bestResult = targetInfo - upperLimit = targetBlock - 1 - } - } - - return bestResult.L1InfoTreeIndex, nil -} - -func (b *BridgeEndpoints) getFirstL1InfoTreeIndexForL2Bridge(ctx context.Context, depositCount uint32) (uint32, error) { - // NOTE: this code assumes that all the rollup exit roots - // (produced by the smart contract call verifyBatches / verifyBatchesTrustedAggregator) - // are included in the L1 info tree. As per the current implementation (smart contracts) of the protocol - // this is true. This could change in the future - lastVerified, err := b.l1InfoTree.GetLastVerifiedBatches(b.networkID - 1) - if err != nil { - return 0, err - } - - root, err := b.bridgeL2.GetRootByLER(ctx, lastVerified.ExitRoot) - if err != nil { - return 0, err - } - if root.Index < depositCount { - return 0, ErrNotOnL1Info - } - - firstVerified, err := b.l1InfoTree.GetFirstVerifiedBatches(b.networkID - 1) - if err != nil { - return 0, err - } - - // Binary search between the first and last blcoks where batches were verified. - // Find the smallest deposit count that is greater than depositCount and matches with - // a LER that is verified - bestResult := lastVerified - lowerLimit := firstVerified.BlockNumber - upperLimit := lastVerified.BlockNumber - for lowerLimit <= upperLimit { - targetBlock := lowerLimit + ((upperLimit - lowerLimit) / binnarySearchDivider) - targetVerified, err := b.l1InfoTree.GetFirstVerifiedBatchesAfterBlock(b.networkID-1, targetBlock) - if err != nil { - return 0, err - } - root, err = b.bridgeL2.GetRootByLER(ctx, targetVerified.ExitRoot) - if err != nil { - return 0, err - } - if root.Index < depositCount { - lowerLimit = targetBlock + 1 - } else if root.Index == depositCount { - bestResult = targetVerified - break - } else { - bestResult = targetVerified - upperLimit = targetBlock - 1 - } - } - - info, err := b.l1InfoTree.GetFirstL1InfoWithRollupExitRoot(bestResult.RollupExitRoot) - if err != nil { - return 0, err - } - return info.L1InfoTreeIndex, nil -} diff --git a/rpc/bridge_interfaces.go b/rpc/bridge_interfaces.go deleted file mode 100644 index bf6721ea7..000000000 --- a/rpc/bridge_interfaces.go +++ /dev/null @@ -1,41 +0,0 @@ -package rpc - -import ( - "context" - "math/big" - - "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/lastgersync" - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -type Bridger interface { - GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (tree.Proof, error) - GetRootByLER(ctx context.Context, ler common.Hash) (*tree.Root, error) -} - -type LastGERer interface { - GetFirstGERAfterL1InfoTreeIndex( - ctx context.Context, atOrAfterL1InfoTreeIndex uint32, - ) (lastgersync.Event, error) -} - -type L1InfoTreer interface { - GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) - GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (tree.Proof, error) - GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) - GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) - GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) - GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) - GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) - GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) - GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) - GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) -} - -type ClaimSponsorer interface { - AddClaimToQueue(claim *claimsponsor.Claim) error - GetClaim(globalIndex *big.Int) (*claimsponsor.Claim, error) -} diff --git a/rpc/bridge_test.go b/rpc/bridge_test.go deleted file mode 100644 index 9d461a50a..000000000 --- a/rpc/bridge_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package rpc - -import ( - "context" - "errors" - "testing" - - cdkCommon "github.com/0xPolygon/cdk/common" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - mocks "github.com/0xPolygon/cdk/rpc/mocks" - tree "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestGetFirstL1InfoTreeIndexForL1Bridge(t *testing.T) { - type testCase struct { - description string - setupMocks func() - depositCount uint32 - expectedIndex uint32 - expectedErr error - } - ctx := context.Background() - b := newBridgeWithMocks(t) - fooErr := errors.New("foo") - firstL1Info := &l1infotreesync.L1InfoTreeLeaf{ - BlockNumber: 10, - MainnetExitRoot: common.HexToHash("alfa"), - } - lastL1Info := &l1infotreesync.L1InfoTreeLeaf{ - BlockNumber: 1000, - MainnetExitRoot: common.HexToHash("alfa"), - } - mockHappyPath := func() { - // to make this work, assume that block number == l1 info tree index == deposit count - b.l1InfoTree.On("GetLastInfo"). - Return(lastL1Info, nil). - Once() - b.l1InfoTree.On("GetFirstInfo"). - Return(firstL1Info, nil). - Once() - infoAfterBlock := &l1infotreesync.L1InfoTreeLeaf{} - b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). - Run(func(args mock.Arguments) { - blockNum, ok := args.Get(0).(uint64) - require.True(t, ok) - infoAfterBlock.L1InfoTreeIndex = uint32(blockNum) - infoAfterBlock.BlockNumber = blockNum - infoAfterBlock.MainnetExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) - }). - Return(infoAfterBlock, nil) - rootByLER := &tree.Root{} - b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). - Run(func(args mock.Arguments) { - ler, ok := args.Get(1).(common.Hash) - require.True(t, ok) - index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 - if ler == common.HexToHash("alfa") { - index = uint32(lastL1Info.BlockNumber) - } - rootByLER.Index = index - }). - Return(rootByLER, nil) - } - testCases := []testCase{ - { - description: "error on GetLastInfo", - setupMocks: func() { - b.l1InfoTree.On("GetLastInfo"). - Return(nil, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "error on first GetRootByLER", - setupMocks: func() { - b.l1InfoTree.On("GetLastInfo"). - Return(lastL1Info, nil). - Once() - b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). - Return(&tree.Root{}, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "not included yet", - setupMocks: func() { - b.l1InfoTree.On("GetLastInfo"). - Return(lastL1Info, nil). - Once() - b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). - Return(&tree.Root{Index: 10}, nil). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: ErrNotOnL1Info, - }, - { - description: "error on GetFirstInfo", - setupMocks: func() { - b.l1InfoTree.On("GetLastInfo"). - Return(lastL1Info, nil). - Once() - b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). - Return(&tree.Root{Index: 13}, nil). - Once() - b.l1InfoTree.On("GetFirstInfo"). - Return(nil, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "error on GetFirstInfoAfterBlock", - setupMocks: func() { - b.l1InfoTree.On("GetLastInfo"). - Return(lastL1Info, nil). - Once() - b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). - Return(&tree.Root{Index: 13}, nil). - Once() - b.l1InfoTree.On("GetFirstInfo"). - Return(firstL1Info, nil). - Once() - b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). - Return(nil, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "error on GetRootByLER (inside binnary search)", - setupMocks: func() { - b.l1InfoTree.On("GetLastInfo"). - Return(lastL1Info, nil). - Once() - b.bridgeL1.On("GetRootByLER", ctx, lastL1Info.MainnetExitRoot). - Return(&tree.Root{Index: 13}, nil). - Once() - b.l1InfoTree.On("GetFirstInfo"). - Return(firstL1Info, nil). - Once() - b.l1InfoTree.On("GetFirstInfoAfterBlock", mock.Anything). - Return(firstL1Info, nil). - Once() - b.bridgeL1.On("GetRootByLER", ctx, mock.Anything). - Return(&tree.Root{}, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "happy path 1", - setupMocks: mockHappyPath, - depositCount: 10, - expectedIndex: 10, - expectedErr: nil, - }, - { - description: "happy path 2", - setupMocks: mockHappyPath, - depositCount: 11, - expectedIndex: 11, - expectedErr: nil, - }, - { - description: "happy path 3", - setupMocks: mockHappyPath, - depositCount: 333, - expectedIndex: 333, - expectedErr: nil, - }, - { - description: "happy path 4", - setupMocks: mockHappyPath, - depositCount: 420, - expectedIndex: 420, - expectedErr: nil, - }, - { - description: "happy path 5", - setupMocks: mockHappyPath, - depositCount: 69, - expectedIndex: 69, - expectedErr: nil, - }, - } - - for _, tc := range testCases { - log.Debugf("running test case: %s", tc.description) - tc.setupMocks() - actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL1Bridge(ctx, tc.depositCount) - require.Equal(t, tc.expectedErr, err) - require.Equal(t, tc.expectedIndex, actualIndex) - } -} - -func TestGetFirstL1InfoTreeIndexForL2Bridge(t *testing.T) { - type testCase struct { - description string - setupMocks func() - depositCount uint32 - expectedIndex uint32 - expectedErr error - } - ctx := context.Background() - b := newBridgeWithMocks(t) - fooErr := errors.New("foo") - firstVerified := &l1infotreesync.VerifyBatches{ - BlockNumber: 10, - ExitRoot: common.HexToHash("alfa"), - } - lastVerified := &l1infotreesync.VerifyBatches{ - BlockNumber: 1000, - ExitRoot: common.HexToHash("alfa"), - } - mockHappyPath := func() { - // to make this work, assume that block number == l1 info tree index == deposit count - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(lastVerified, nil). - Once() - b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). - Return(firstVerified, nil). - Once() - verifiedAfterBlock := &l1infotreesync.VerifyBatches{} - b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). - Run(func(args mock.Arguments) { - blockNum, ok := args.Get(1).(uint64) - require.True(t, ok) - verifiedAfterBlock.BlockNumber = blockNum - verifiedAfterBlock.ExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) - verifiedAfterBlock.RollupExitRoot = common.BytesToHash(cdkCommon.Uint32ToBytes(uint32(blockNum))) - }). - Return(verifiedAfterBlock, nil) - rootByLER := &tree.Root{} - b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). - Run(func(args mock.Arguments) { - ler, ok := args.Get(1).(common.Hash) - require.True(t, ok) - index := cdkCommon.BytesToUint32(ler.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 - if ler == common.HexToHash("alfa") { - index = uint32(lastVerified.BlockNumber) - } - rootByLER.Index = index - }). - Return(rootByLER, nil) - info := &l1infotreesync.L1InfoTreeLeaf{} - b.l1InfoTree.On("GetFirstL1InfoWithRollupExitRoot", mock.Anything). - Run(func(args mock.Arguments) { - exitRoot, ok := args.Get(0).(common.Hash) - require.True(t, ok) - index := cdkCommon.BytesToUint32(exitRoot.Bytes()[28:]) // hash is 32 bytes, uint32 is just 4 - info.L1InfoTreeIndex = index - }). - Return(info, nil). - Once() - } - testCases := []testCase{ - { - description: "error on GetLastVerified", - setupMocks: func() { - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(nil, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "error on first GetRootByLER", - setupMocks: func() { - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(lastVerified, nil). - Once() - b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). - Return(&tree.Root{}, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "not included yet", - setupMocks: func() { - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(lastVerified, nil). - Once() - b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). - Return(&tree.Root{Index: 10}, nil). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: ErrNotOnL1Info, - }, - { - description: "error on GetFirstVerified", - setupMocks: func() { - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(lastVerified, nil). - Once() - b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). - Return(&tree.Root{Index: 13}, nil). - Once() - b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). - Return(nil, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "error on GetFirstVerifiedBatchesAfterBlock", - setupMocks: func() { - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(lastVerified, nil). - Once() - b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). - Return(&tree.Root{Index: 13}, nil). - Once() - b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). - Return(firstVerified, nil). - Once() - b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). - Return(nil, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "error on GetRootByLER (inside binnary search)", - setupMocks: func() { - b.l1InfoTree.On("GetLastVerifiedBatches", uint32(1)). - Return(lastVerified, nil). - Once() - b.bridgeL2.On("GetRootByLER", ctx, lastVerified.ExitRoot). - Return(&tree.Root{Index: 13}, nil). - Once() - b.l1InfoTree.On("GetFirstVerifiedBatches", uint32(1)). - Return(firstVerified, nil). - Once() - b.l1InfoTree.On("GetFirstVerifiedBatchesAfterBlock", uint32(1), mock.Anything). - Return(firstVerified, nil). - Once() - b.bridgeL2.On("GetRootByLER", ctx, mock.Anything). - Return(&tree.Root{}, fooErr). - Once() - }, - depositCount: 11, - expectedIndex: 0, - expectedErr: fooErr, - }, - { - description: "happy path 1", - setupMocks: mockHappyPath, - depositCount: 10, - expectedIndex: 10, - expectedErr: nil, - }, - { - description: "happy path 2", - setupMocks: mockHappyPath, - depositCount: 11, - expectedIndex: 11, - expectedErr: nil, - }, - { - description: "happy path 3", - setupMocks: mockHappyPath, - depositCount: 333, - expectedIndex: 333, - expectedErr: nil, - }, - { - description: "happy path 4", - setupMocks: mockHappyPath, - depositCount: 420, - expectedIndex: 420, - expectedErr: nil, - }, - { - description: "happy path 5", - setupMocks: mockHappyPath, - depositCount: 69, - expectedIndex: 69, - expectedErr: nil, - }, - } - - for _, tc := range testCases { - log.Debugf("running test case: %s", tc.description) - tc.setupMocks() - actualIndex, err := b.bridge.getFirstL1InfoTreeIndexForL2Bridge(ctx, tc.depositCount) - require.Equal(t, tc.expectedErr, err) - require.Equal(t, tc.expectedIndex, actualIndex) - } -} - -type bridgeWithMocks struct { - bridge *BridgeEndpoints - sponsor *mocks.ClaimSponsorer - l1InfoTree *mocks.L1InfoTreer - injectedGERs *mocks.LastGERer - bridgeL1 *mocks.Bridger - bridgeL2 *mocks.Bridger -} - -func newBridgeWithMocks(t *testing.T) bridgeWithMocks { - t.Helper() - b := bridgeWithMocks{ - sponsor: mocks.NewClaimSponsorer(t), - l1InfoTree: mocks.NewL1InfoTreer(t), - injectedGERs: mocks.NewLastGERer(t), - bridgeL1: mocks.NewBridger(t), - bridgeL2: mocks.NewBridger(t), - } - logger := log.WithFields("module", "bridgerpc") - b.bridge = NewBridgeEndpoints( - logger, 0, 0, 2, b.sponsor, b.l1InfoTree, b.injectedGERs, b.bridgeL1, b.bridgeL2, - ) - return b -} diff --git a/rpc/client/bridge.go b/rpc/client/bridge.go deleted file mode 100644 index f67907f27..000000000 --- a/rpc/client/bridge.go +++ /dev/null @@ -1,94 +0,0 @@ -package rpc - -import ( - "encoding/json" - "fmt" - "math/big" - - "github.com/0xPolygon/cdk-rpc/rpc" - "github.com/0xPolygon/cdk/claimsponsor" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/rpc/types" -) - -type BridgeClientInterface interface { - L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) - InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) - ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) - SponsorClaim(claim claimsponsor.Claim) error - GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) -} - -// L1InfoTreeIndexForBridge returns the first L1 Info Tree index in which the bridge was included. -// networkID represents the origin network. -// This call needs to be done to a client of the same network were the bridge tx was sent -func (c *Client) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { - response, err := rpc.JSONRPCCall(c.url, "bridge_l1InfoTreeIndexForBridge", networkID, depositCount) - if err != nil { - return 0, err - } - if response.Error != nil { - return 0, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) - } - var result uint32 - return result, json.Unmarshal(response.Result, &result) -} - -// InjectedInfoAfterIndex return the first GER injected onto the network that is linked -// to the given index or greater. This call is useful to understand when a bridge is ready to be claimed -// on its destination network -func (c *Client) InjectedInfoAfterIndex( - networkID uint32, l1InfoTreeIndex uint32, -) (*l1infotreesync.L1InfoTreeLeaf, error) { - response, err := rpc.JSONRPCCall(c.url, "bridge_injectedInfoAfterIndex", networkID, l1InfoTreeIndex) - if err != nil { - return nil, err - } - if response.Error != nil { - return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) - } - var result l1infotreesync.L1InfoTreeLeaf - return &result, json.Unmarshal(response.Result, &result) -} - -// ClaimProof returns the proofs needed to claim a bridge. NetworkID and depositCount refere to the bridge origin -// while globalExitRoot should be already injected on the destination network. -// This call needs to be done to a client of the same network were the bridge tx was sent -func (c *Client) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { - response, err := rpc.JSONRPCCall(c.url, "bridge_claimProof", networkID, depositCount, l1InfoTreeIndex) - if err != nil { - return nil, err - } - if response.Error != nil { - return nil, fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) - } - var result types.ClaimProof - return &result, json.Unmarshal(response.Result, &result) -} - -// SponsorClaim sends a claim tx on behalf of the user. -// This call needs to be done to a client of the same network were the claim is going to be sent (bridge destination) -func (c *Client) SponsorClaim(claim claimsponsor.Claim) error { - response, err := rpc.JSONRPCCall(c.url, "bridge_sponsorClaim", claim) - if err != nil { - return err - } - if response.Error != nil { - return fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) - } - return nil -} - -// GetSponsoredClaimStatus returns the status of a claim that has been previously requested to be sponsored. -// This call needs to be done to the same client were it was requested to be sponsored -func (c *Client) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { - response, err := rpc.JSONRPCCall(c.url, "bridge_getSponsoredClaimStatus", globalIndex) - if err != nil { - return "", err - } - if response.Error != nil { - return "", fmt.Errorf("%v %v", response.Error.Code, response.Error.Message) - } - var result claimsponsor.ClaimStatus - return result, json.Unmarshal(response.Result, &result) -} diff --git a/rpc/client/client.go b/rpc/client/client.go deleted file mode 100644 index b48fca519..000000000 --- a/rpc/client/client.go +++ /dev/null @@ -1,31 +0,0 @@ -package rpc - -// ClientInterface is the interface that defines the implementation of all the endpoints -type ClientInterface interface { - BridgeClientInterface -} - -// ClientFactoryInterface interface for the client factory -type ClientFactoryInterface interface { - NewClient(url string) ClientInterface -} - -// ClientFactory is the implementation of the data committee client factory -type ClientFactory struct{} - -// NewClient returns an implementation of the data committee node client -func (f *ClientFactory) NewClient(url string) ClientInterface { - return NewClient(url) -} - -// Client wraps all the available endpoints of the data abailability committee node server -type Client struct { - url string -} - -// NewClient returns a client ready to be used -func NewClient(url string) *Client { - return &Client{ - url: url, - } -} diff --git a/rpc/mocks/bridge_client_interface.go b/rpc/mocks/bridge_client_interface.go deleted file mode 100644 index 4c5200e4d..000000000 --- a/rpc/mocks/bridge_client_interface.go +++ /dev/null @@ -1,319 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - claimsponsor "github.com/0xPolygon/cdk/claimsponsor" - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/rpc/types" -) - -// BridgeClientInterface is an autogenerated mock type for the BridgeClientInterface type -type BridgeClientInterface struct { - mock.Mock -} - -type BridgeClientInterface_Expecter struct { - mock *mock.Mock -} - -func (_m *BridgeClientInterface) EXPECT() *BridgeClientInterface_Expecter { - return &BridgeClientInterface_Expecter{mock: &_m.Mock} -} - -// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex -func (_m *BridgeClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { - ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) - - if len(ret) == 0 { - panic("no return value specified for ClaimProof") - } - - var r0 *types.ClaimProof - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { - return rf(networkID, depositCount, l1InfoTreeIndex) - } - if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { - r0 = rf(networkID, depositCount, l1InfoTreeIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ClaimProof) - } - } - - if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { - r1 = rf(networkID, depositCount, l1InfoTreeIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BridgeClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' -type BridgeClientInterface_ClaimProof_Call struct { - *mock.Call -} - -// ClaimProof is a helper method to define mock.On call -// - networkID uint32 -// - depositCount uint32 -// - l1InfoTreeIndex uint32 -func (_e *BridgeClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_ClaimProof_Call { - return &BridgeClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} -} - -func (_c *BridgeClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_ClaimProof_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) - }) - return _c -} - -func (_c *BridgeClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *BridgeClientInterface_ClaimProof_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *BridgeClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *BridgeClientInterface_ClaimProof_Call { - _c.Call.Return(run) - return _c -} - -// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex -func (_m *BridgeClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { - ret := _m.Called(globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetSponsoredClaimStatus") - } - - var r0 claimsponsor.ClaimStatus - var r1 error - if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { - return rf(globalIndex) - } - if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { - r0 = rf(globalIndex) - } else { - r0 = ret.Get(0).(claimsponsor.ClaimStatus) - } - - if rf, ok := ret.Get(1).(func(*big.Int) error); ok { - r1 = rf(globalIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BridgeClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' -type BridgeClientInterface_GetSponsoredClaimStatus_Call struct { - *mock.Call -} - -// GetSponsoredClaimStatus is a helper method to define mock.On call -// - globalIndex *big.Int -func (_e *BridgeClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *BridgeClientInterface_GetSponsoredClaimStatus_Call { - return &BridgeClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} -} - -func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*big.Int)) - }) - return _c -} - -func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *BridgeClientInterface_GetSponsoredClaimStatus_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *BridgeClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *BridgeClientInterface_GetSponsoredClaimStatus_Call { - _c.Call.Return(run) - return _c -} - -// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex -func (_m *BridgeClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(networkID, l1InfoTreeIndex) - - if len(ret) == 0 { - panic("no return value specified for InjectedInfoAfterIndex") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(networkID, l1InfoTreeIndex) - } - if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(networkID, l1InfoTreeIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { - r1 = rf(networkID, l1InfoTreeIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BridgeClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' -type BridgeClientInterface_InjectedInfoAfterIndex_Call struct { - *mock.Call -} - -// InjectedInfoAfterIndex is a helper method to define mock.On call -// - networkID uint32 -// - l1InfoTreeIndex uint32 -func (_e *BridgeClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *BridgeClientInterface_InjectedInfoAfterIndex_Call { - return &BridgeClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} -} - -func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint32)) - }) - return _c -} - -func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *BridgeClientInterface_InjectedInfoAfterIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *BridgeClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *BridgeClientInterface_InjectedInfoAfterIndex_Call { - _c.Call.Return(run) - return _c -} - -// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount -func (_m *BridgeClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { - ret := _m.Called(networkID, depositCount) - - if len(ret) == 0 { - panic("no return value specified for L1InfoTreeIndexForBridge") - } - - var r0 uint32 - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { - return rf(networkID, depositCount) - } - if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { - r0 = rf(networkID, depositCount) - } else { - r0 = ret.Get(0).(uint32) - } - - if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { - r1 = rf(networkID, depositCount) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BridgeClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' -type BridgeClientInterface_L1InfoTreeIndexForBridge_Call struct { - *mock.Call -} - -// L1InfoTreeIndexForBridge is a helper method to define mock.On call -// - networkID uint32 -// - depositCount uint32 -func (_e *BridgeClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { - return &BridgeClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} -} - -func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint32)) - }) - return _c -} - -func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *BridgeClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *BridgeClientInterface_L1InfoTreeIndexForBridge_Call { - _c.Call.Return(run) - return _c -} - -// SponsorClaim provides a mock function with given fields: claim -func (_m *BridgeClientInterface) SponsorClaim(claim claimsponsor.Claim) error { - ret := _m.Called(claim) - - if len(ret) == 0 { - panic("no return value specified for SponsorClaim") - } - - var r0 error - if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { - r0 = rf(claim) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// BridgeClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' -type BridgeClientInterface_SponsorClaim_Call struct { - *mock.Call -} - -// SponsorClaim is a helper method to define mock.On call -// - claim claimsponsor.Claim -func (_e *BridgeClientInterface_Expecter) SponsorClaim(claim interface{}) *BridgeClientInterface_SponsorClaim_Call { - return &BridgeClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} -} - -func (_c *BridgeClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *BridgeClientInterface_SponsorClaim_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(claimsponsor.Claim)) - }) - return _c -} - -func (_c *BridgeClientInterface_SponsorClaim_Call) Return(_a0 error) *BridgeClientInterface_SponsorClaim_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *BridgeClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *BridgeClientInterface_SponsorClaim_Call { - _c.Call.Return(run) - return _c -} - -// NewBridgeClientInterface creates a new instance of BridgeClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewBridgeClientInterface(t interface { - mock.TestingT - Cleanup(func()) -}) *BridgeClientInterface { - mock := &BridgeClientInterface{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/mocks/bridger.go b/rpc/mocks/bridger.go deleted file mode 100644 index d0344c294..000000000 --- a/rpc/mocks/bridger.go +++ /dev/null @@ -1,159 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/tree/types" -) - -// Bridger is an autogenerated mock type for the Bridger type -type Bridger struct { - mock.Mock -} - -type Bridger_Expecter struct { - mock *mock.Mock -} - -func (_m *Bridger) EXPECT() *Bridger_Expecter { - return &Bridger_Expecter{mock: &_m.Mock} -} - -// GetProof provides a mock function with given fields: ctx, depositCount, localExitRoot -func (_m *Bridger) GetProof(ctx context.Context, depositCount uint32, localExitRoot common.Hash) (types.Proof, error) { - ret := _m.Called(ctx, depositCount, localExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetProof") - } - - var r0 types.Proof - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { - return rf(ctx, depositCount, localExitRoot) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { - r0 = rf(ctx, depositCount, localExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Proof) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { - r1 = rf(ctx, depositCount, localExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Bridger_GetProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetProof' -type Bridger_GetProof_Call struct { - *mock.Call -} - -// GetProof is a helper method to define mock.On call -// - ctx context.Context -// - depositCount uint32 -// - localExitRoot common.Hash -func (_e *Bridger_Expecter) GetProof(ctx interface{}, depositCount interface{}, localExitRoot interface{}) *Bridger_GetProof_Call { - return &Bridger_GetProof_Call{Call: _e.mock.On("GetProof", ctx, depositCount, localExitRoot)} -} - -func (_c *Bridger_GetProof_Call) Run(run func(ctx context.Context, depositCount uint32, localExitRoot common.Hash)) *Bridger_GetProof_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) - }) - return _c -} - -func (_c *Bridger_GetProof_Call) Return(_a0 types.Proof, _a1 error) *Bridger_GetProof_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Bridger_GetProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *Bridger_GetProof_Call { - _c.Call.Return(run) - return _c -} - -// GetRootByLER provides a mock function with given fields: ctx, ler -func (_m *Bridger) GetRootByLER(ctx context.Context, ler common.Hash) (*types.Root, error) { - ret := _m.Called(ctx, ler) - - if len(ret) == 0 { - panic("no return value specified for GetRootByLER") - } - - var r0 *types.Root - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Root, error)); ok { - return rf(ctx, ler) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Root); ok { - r0 = rf(ctx, ler) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Root) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, ler) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Bridger_GetRootByLER_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRootByLER' -type Bridger_GetRootByLER_Call struct { - *mock.Call -} - -// GetRootByLER is a helper method to define mock.On call -// - ctx context.Context -// - ler common.Hash -func (_e *Bridger_Expecter) GetRootByLER(ctx interface{}, ler interface{}) *Bridger_GetRootByLER_Call { - return &Bridger_GetRootByLER_Call{Call: _e.mock.On("GetRootByLER", ctx, ler)} -} - -func (_c *Bridger_GetRootByLER_Call) Run(run func(ctx context.Context, ler common.Hash)) *Bridger_GetRootByLER_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *Bridger_GetRootByLER_Call) Return(_a0 *types.Root, _a1 error) *Bridger_GetRootByLER_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Bridger_GetRootByLER_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Root, error)) *Bridger_GetRootByLER_Call { - _c.Call.Return(run) - return _c -} - -// NewBridger creates a new instance of Bridger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewBridger(t interface { - mock.TestingT - Cleanup(func()) -}) *Bridger { - mock := &Bridger{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/mocks/claim_sponsorer.go b/rpc/mocks/claim_sponsorer.go deleted file mode 100644 index 9a9ef9b5e..000000000 --- a/rpc/mocks/claim_sponsorer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - claimsponsor "github.com/0xPolygon/cdk/claimsponsor" - mock "github.com/stretchr/testify/mock" -) - -// ClaimSponsorer is an autogenerated mock type for the ClaimSponsorer type -type ClaimSponsorer struct { - mock.Mock -} - -type ClaimSponsorer_Expecter struct { - mock *mock.Mock -} - -func (_m *ClaimSponsorer) EXPECT() *ClaimSponsorer_Expecter { - return &ClaimSponsorer_Expecter{mock: &_m.Mock} -} - -// AddClaimToQueue provides a mock function with given fields: claim -func (_m *ClaimSponsorer) AddClaimToQueue(claim *claimsponsor.Claim) error { - ret := _m.Called(claim) - - if len(ret) == 0 { - panic("no return value specified for AddClaimToQueue") - } - - var r0 error - if rf, ok := ret.Get(0).(func(*claimsponsor.Claim) error); ok { - r0 = rf(claim) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ClaimSponsorer_AddClaimToQueue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddClaimToQueue' -type ClaimSponsorer_AddClaimToQueue_Call struct { - *mock.Call -} - -// AddClaimToQueue is a helper method to define mock.On call -// - claim *claimsponsor.Claim -func (_e *ClaimSponsorer_Expecter) AddClaimToQueue(claim interface{}) *ClaimSponsorer_AddClaimToQueue_Call { - return &ClaimSponsorer_AddClaimToQueue_Call{Call: _e.mock.On("AddClaimToQueue", claim)} -} - -func (_c *ClaimSponsorer_AddClaimToQueue_Call) Run(run func(claim *claimsponsor.Claim)) *ClaimSponsorer_AddClaimToQueue_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*claimsponsor.Claim)) - }) - return _c -} - -func (_c *ClaimSponsorer_AddClaimToQueue_Call) Return(_a0 error) *ClaimSponsorer_AddClaimToQueue_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ClaimSponsorer_AddClaimToQueue_Call) RunAndReturn(run func(*claimsponsor.Claim) error) *ClaimSponsorer_AddClaimToQueue_Call { - _c.Call.Return(run) - return _c -} - -// GetClaim provides a mock function with given fields: globalIndex -func (_m *ClaimSponsorer) GetClaim(globalIndex *big.Int) (*claimsponsor.Claim, error) { - ret := _m.Called(globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetClaim") - } - - var r0 *claimsponsor.Claim - var r1 error - if rf, ok := ret.Get(0).(func(*big.Int) (*claimsponsor.Claim, error)); ok { - return rf(globalIndex) - } - if rf, ok := ret.Get(0).(func(*big.Int) *claimsponsor.Claim); ok { - r0 = rf(globalIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*claimsponsor.Claim) - } - } - - if rf, ok := ret.Get(1).(func(*big.Int) error); ok { - r1 = rf(globalIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClaimSponsorer_GetClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetClaim' -type ClaimSponsorer_GetClaim_Call struct { - *mock.Call -} - -// GetClaim is a helper method to define mock.On call -// - globalIndex *big.Int -func (_e *ClaimSponsorer_Expecter) GetClaim(globalIndex interface{}) *ClaimSponsorer_GetClaim_Call { - return &ClaimSponsorer_GetClaim_Call{Call: _e.mock.On("GetClaim", globalIndex)} -} - -func (_c *ClaimSponsorer_GetClaim_Call) Run(run func(globalIndex *big.Int)) *ClaimSponsorer_GetClaim_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*big.Int)) - }) - return _c -} - -func (_c *ClaimSponsorer_GetClaim_Call) Return(_a0 *claimsponsor.Claim, _a1 error) *ClaimSponsorer_GetClaim_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ClaimSponsorer_GetClaim_Call) RunAndReturn(run func(*big.Int) (*claimsponsor.Claim, error)) *ClaimSponsorer_GetClaim_Call { - _c.Call.Return(run) - return _c -} - -// NewClaimSponsorer creates a new instance of ClaimSponsorer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewClaimSponsorer(t interface { - mock.TestingT - Cleanup(func()) -}) *ClaimSponsorer { - mock := &ClaimSponsorer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/mocks/client_factory_interface.go b/rpc/mocks/client_factory_interface.go deleted file mode 100644 index aca7aed02..000000000 --- a/rpc/mocks/client_factory_interface.go +++ /dev/null @@ -1,83 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - rpc "github.com/0xPolygon/cdk/rpc/client" - mock "github.com/stretchr/testify/mock" -) - -// ClientFactoryInterface is an autogenerated mock type for the ClientFactoryInterface type -type ClientFactoryInterface struct { - mock.Mock -} - -type ClientFactoryInterface_Expecter struct { - mock *mock.Mock -} - -func (_m *ClientFactoryInterface) EXPECT() *ClientFactoryInterface_Expecter { - return &ClientFactoryInterface_Expecter{mock: &_m.Mock} -} - -// NewClient provides a mock function with given fields: url -func (_m *ClientFactoryInterface) NewClient(url string) rpc.ClientInterface { - ret := _m.Called(url) - - if len(ret) == 0 { - panic("no return value specified for NewClient") - } - - var r0 rpc.ClientInterface - if rf, ok := ret.Get(0).(func(string) rpc.ClientInterface); ok { - r0 = rf(url) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(rpc.ClientInterface) - } - } - - return r0 -} - -// ClientFactoryInterface_NewClient_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewClient' -type ClientFactoryInterface_NewClient_Call struct { - *mock.Call -} - -// NewClient is a helper method to define mock.On call -// - url string -func (_e *ClientFactoryInterface_Expecter) NewClient(url interface{}) *ClientFactoryInterface_NewClient_Call { - return &ClientFactoryInterface_NewClient_Call{Call: _e.mock.On("NewClient", url)} -} - -func (_c *ClientFactoryInterface_NewClient_Call) Run(run func(url string)) *ClientFactoryInterface_NewClient_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *ClientFactoryInterface_NewClient_Call) Return(_a0 rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ClientFactoryInterface_NewClient_Call) RunAndReturn(run func(string) rpc.ClientInterface) *ClientFactoryInterface_NewClient_Call { - _c.Call.Return(run) - return _c -} - -// NewClientFactoryInterface creates a new instance of ClientFactoryInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewClientFactoryInterface(t interface { - mock.TestingT - Cleanup(func()) -}) *ClientFactoryInterface { - mock := &ClientFactoryInterface{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/mocks/client_interface.go b/rpc/mocks/client_interface.go deleted file mode 100644 index 28b877755..000000000 --- a/rpc/mocks/client_interface.go +++ /dev/null @@ -1,319 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - big "math/big" - - claimsponsor "github.com/0xPolygon/cdk/claimsponsor" - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/rpc/types" -) - -// ClientInterface is an autogenerated mock type for the ClientInterface type -type ClientInterface struct { - mock.Mock -} - -type ClientInterface_Expecter struct { - mock *mock.Mock -} - -func (_m *ClientInterface) EXPECT() *ClientInterface_Expecter { - return &ClientInterface_Expecter{mock: &_m.Mock} -} - -// ClaimProof provides a mock function with given fields: networkID, depositCount, l1InfoTreeIndex -func (_m *ClientInterface) ClaimProof(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32) (*types.ClaimProof, error) { - ret := _m.Called(networkID, depositCount, l1InfoTreeIndex) - - if len(ret) == 0 { - panic("no return value specified for ClaimProof") - } - - var r0 *types.ClaimProof - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) (*types.ClaimProof, error)); ok { - return rf(networkID, depositCount, l1InfoTreeIndex) - } - if rf, ok := ret.Get(0).(func(uint32, uint32, uint32) *types.ClaimProof); ok { - r0 = rf(networkID, depositCount, l1InfoTreeIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ClaimProof) - } - } - - if rf, ok := ret.Get(1).(func(uint32, uint32, uint32) error); ok { - r1 = rf(networkID, depositCount, l1InfoTreeIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClientInterface_ClaimProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClaimProof' -type ClientInterface_ClaimProof_Call struct { - *mock.Call -} - -// ClaimProof is a helper method to define mock.On call -// - networkID uint32 -// - depositCount uint32 -// - l1InfoTreeIndex uint32 -func (_e *ClientInterface_Expecter) ClaimProof(networkID interface{}, depositCount interface{}, l1InfoTreeIndex interface{}) *ClientInterface_ClaimProof_Call { - return &ClientInterface_ClaimProof_Call{Call: _e.mock.On("ClaimProof", networkID, depositCount, l1InfoTreeIndex)} -} - -func (_c *ClientInterface_ClaimProof_Call) Run(run func(networkID uint32, depositCount uint32, l1InfoTreeIndex uint32)) *ClientInterface_ClaimProof_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint32), args[2].(uint32)) - }) - return _c -} - -func (_c *ClientInterface_ClaimProof_Call) Return(_a0 *types.ClaimProof, _a1 error) *ClientInterface_ClaimProof_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ClientInterface_ClaimProof_Call) RunAndReturn(run func(uint32, uint32, uint32) (*types.ClaimProof, error)) *ClientInterface_ClaimProof_Call { - _c.Call.Return(run) - return _c -} - -// GetSponsoredClaimStatus provides a mock function with given fields: globalIndex -func (_m *ClientInterface) GetSponsoredClaimStatus(globalIndex *big.Int) (claimsponsor.ClaimStatus, error) { - ret := _m.Called(globalIndex) - - if len(ret) == 0 { - panic("no return value specified for GetSponsoredClaimStatus") - } - - var r0 claimsponsor.ClaimStatus - var r1 error - if rf, ok := ret.Get(0).(func(*big.Int) (claimsponsor.ClaimStatus, error)); ok { - return rf(globalIndex) - } - if rf, ok := ret.Get(0).(func(*big.Int) claimsponsor.ClaimStatus); ok { - r0 = rf(globalIndex) - } else { - r0 = ret.Get(0).(claimsponsor.ClaimStatus) - } - - if rf, ok := ret.Get(1).(func(*big.Int) error); ok { - r1 = rf(globalIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClientInterface_GetSponsoredClaimStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSponsoredClaimStatus' -type ClientInterface_GetSponsoredClaimStatus_Call struct { - *mock.Call -} - -// GetSponsoredClaimStatus is a helper method to define mock.On call -// - globalIndex *big.Int -func (_e *ClientInterface_Expecter) GetSponsoredClaimStatus(globalIndex interface{}) *ClientInterface_GetSponsoredClaimStatus_Call { - return &ClientInterface_GetSponsoredClaimStatus_Call{Call: _e.mock.On("GetSponsoredClaimStatus", globalIndex)} -} - -func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Run(run func(globalIndex *big.Int)) *ClientInterface_GetSponsoredClaimStatus_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(*big.Int)) - }) - return _c -} - -func (_c *ClientInterface_GetSponsoredClaimStatus_Call) Return(_a0 claimsponsor.ClaimStatus, _a1 error) *ClientInterface_GetSponsoredClaimStatus_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ClientInterface_GetSponsoredClaimStatus_Call) RunAndReturn(run func(*big.Int) (claimsponsor.ClaimStatus, error)) *ClientInterface_GetSponsoredClaimStatus_Call { - _c.Call.Return(run) - return _c -} - -// InjectedInfoAfterIndex provides a mock function with given fields: networkID, l1InfoTreeIndex -func (_m *ClientInterface) InjectedInfoAfterIndex(networkID uint32, l1InfoTreeIndex uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(networkID, l1InfoTreeIndex) - - if len(ret) == 0 { - panic("no return value specified for InjectedInfoAfterIndex") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(networkID, l1InfoTreeIndex) - } - if rf, ok := ret.Get(0).(func(uint32, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(networkID, l1InfoTreeIndex) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { - r1 = rf(networkID, l1InfoTreeIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClientInterface_InjectedInfoAfterIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InjectedInfoAfterIndex' -type ClientInterface_InjectedInfoAfterIndex_Call struct { - *mock.Call -} - -// InjectedInfoAfterIndex is a helper method to define mock.On call -// - networkID uint32 -// - l1InfoTreeIndex uint32 -func (_e *ClientInterface_Expecter) InjectedInfoAfterIndex(networkID interface{}, l1InfoTreeIndex interface{}) *ClientInterface_InjectedInfoAfterIndex_Call { - return &ClientInterface_InjectedInfoAfterIndex_Call{Call: _e.mock.On("InjectedInfoAfterIndex", networkID, l1InfoTreeIndex)} -} - -func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Run(run func(networkID uint32, l1InfoTreeIndex uint32)) *ClientInterface_InjectedInfoAfterIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint32)) - }) - return _c -} - -func (_c *ClientInterface_InjectedInfoAfterIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *ClientInterface_InjectedInfoAfterIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ClientInterface_InjectedInfoAfterIndex_Call) RunAndReturn(run func(uint32, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *ClientInterface_InjectedInfoAfterIndex_Call { - _c.Call.Return(run) - return _c -} - -// L1InfoTreeIndexForBridge provides a mock function with given fields: networkID, depositCount -func (_m *ClientInterface) L1InfoTreeIndexForBridge(networkID uint32, depositCount uint32) (uint32, error) { - ret := _m.Called(networkID, depositCount) - - if len(ret) == 0 { - panic("no return value specified for L1InfoTreeIndexForBridge") - } - - var r0 uint32 - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint32) (uint32, error)); ok { - return rf(networkID, depositCount) - } - if rf, ok := ret.Get(0).(func(uint32, uint32) uint32); ok { - r0 = rf(networkID, depositCount) - } else { - r0 = ret.Get(0).(uint32) - } - - if rf, ok := ret.Get(1).(func(uint32, uint32) error); ok { - r1 = rf(networkID, depositCount) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ClientInterface_L1InfoTreeIndexForBridge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'L1InfoTreeIndexForBridge' -type ClientInterface_L1InfoTreeIndexForBridge_Call struct { - *mock.Call -} - -// L1InfoTreeIndexForBridge is a helper method to define mock.On call -// - networkID uint32 -// - depositCount uint32 -func (_e *ClientInterface_Expecter) L1InfoTreeIndexForBridge(networkID interface{}, depositCount interface{}) *ClientInterface_L1InfoTreeIndexForBridge_Call { - return &ClientInterface_L1InfoTreeIndexForBridge_Call{Call: _e.mock.On("L1InfoTreeIndexForBridge", networkID, depositCount)} -} - -func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Run(run func(networkID uint32, depositCount uint32)) *ClientInterface_L1InfoTreeIndexForBridge_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint32)) - }) - return _c -} - -func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) Return(_a0 uint32, _a1 error) *ClientInterface_L1InfoTreeIndexForBridge_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ClientInterface_L1InfoTreeIndexForBridge_Call) RunAndReturn(run func(uint32, uint32) (uint32, error)) *ClientInterface_L1InfoTreeIndexForBridge_Call { - _c.Call.Return(run) - return _c -} - -// SponsorClaim provides a mock function with given fields: claim -func (_m *ClientInterface) SponsorClaim(claim claimsponsor.Claim) error { - ret := _m.Called(claim) - - if len(ret) == 0 { - panic("no return value specified for SponsorClaim") - } - - var r0 error - if rf, ok := ret.Get(0).(func(claimsponsor.Claim) error); ok { - r0 = rf(claim) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ClientInterface_SponsorClaim_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SponsorClaim' -type ClientInterface_SponsorClaim_Call struct { - *mock.Call -} - -// SponsorClaim is a helper method to define mock.On call -// - claim claimsponsor.Claim -func (_e *ClientInterface_Expecter) SponsorClaim(claim interface{}) *ClientInterface_SponsorClaim_Call { - return &ClientInterface_SponsorClaim_Call{Call: _e.mock.On("SponsorClaim", claim)} -} - -func (_c *ClientInterface_SponsorClaim_Call) Run(run func(claim claimsponsor.Claim)) *ClientInterface_SponsorClaim_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(claimsponsor.Claim)) - }) - return _c -} - -func (_c *ClientInterface_SponsorClaim_Call) Return(_a0 error) *ClientInterface_SponsorClaim_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ClientInterface_SponsorClaim_Call) RunAndReturn(run func(claimsponsor.Claim) error) *ClientInterface_SponsorClaim_Call { - _c.Call.Return(run) - return _c -} - -// NewClientInterface creates a new instance of ClientInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewClientInterface(t interface { - mock.TestingT - Cleanup(func()) -}) *ClientInterface { - mock := &ClientInterface{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/mocks/l1_info_treer.go b/rpc/mocks/l1_info_treer.go deleted file mode 100644 index a4e0f66c5..000000000 --- a/rpc/mocks/l1_info_treer.go +++ /dev/null @@ -1,626 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" - - mock "github.com/stretchr/testify/mock" - - types "github.com/0xPolygon/cdk/tree/types" -) - -// L1InfoTreer is an autogenerated mock type for the L1InfoTreer type -type L1InfoTreer struct { - mock.Mock -} - -type L1InfoTreer_Expecter struct { - mock *mock.Mock -} - -func (_m *L1InfoTreer) EXPECT() *L1InfoTreer_Expecter { - return &L1InfoTreer_Expecter{mock: &_m.Mock} -} - -// GetFirstInfo provides a mock function with given fields: -func (_m *L1InfoTreer) GetFirstInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetFirstInfo") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetFirstInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfo' -type L1InfoTreer_GetFirstInfo_Call struct { - *mock.Call -} - -// GetFirstInfo is a helper method to define mock.On call -func (_e *L1InfoTreer_Expecter) GetFirstInfo() *L1InfoTreer_GetFirstInfo_Call { - return &L1InfoTreer_GetFirstInfo_Call{Call: _e.mock.On("GetFirstInfo")} -} - -func (_c *L1InfoTreer_GetFirstInfo_Call) Run(run func()) *L1InfoTreer_GetFirstInfo_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L1InfoTreer_GetFirstInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfo_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetFirstInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfo_Call { - _c.Call.Return(run) - return _c -} - -// GetFirstInfoAfterBlock provides a mock function with given fields: blockNum -func (_m *L1InfoTreer) GetFirstInfoAfterBlock(blockNum uint64) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(blockNum) - - if len(ret) == 0 { - panic("no return value specified for GetFirstInfoAfterBlock") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(blockNum) - } - if rf, ok := ret.Get(0).(func(uint64) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(blockNum) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(uint64) error); ok { - r1 = rf(blockNum) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetFirstInfoAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstInfoAfterBlock' -type L1InfoTreer_GetFirstInfoAfterBlock_Call struct { - *mock.Call -} - -// GetFirstInfoAfterBlock is a helper method to define mock.On call -// - blockNum uint64 -func (_e *L1InfoTreer_Expecter) GetFirstInfoAfterBlock(blockNum interface{}) *L1InfoTreer_GetFirstInfoAfterBlock_Call { - return &L1InfoTreer_GetFirstInfoAfterBlock_Call{Call: _e.mock.On("GetFirstInfoAfterBlock", blockNum)} -} - -func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Run(run func(blockNum uint64)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint64)) - }) - return _c -} - -func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstInfoAfterBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetFirstInfoAfterBlock_Call) RunAndReturn(run func(uint64) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstInfoAfterBlock_Call { - _c.Call.Return(run) - return _c -} - -// GetFirstL1InfoWithRollupExitRoot provides a mock function with given fields: rollupExitRoot -func (_m *L1InfoTreer) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(rollupExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetFirstL1InfoWithRollupExitRoot") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(rollupExitRoot) - } - if rf, ok := ret.Get(0).(func(common.Hash) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(rollupExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(common.Hash) error); ok { - r1 = rf(rollupExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstL1InfoWithRollupExitRoot' -type L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call struct { - *mock.Call -} - -// GetFirstL1InfoWithRollupExitRoot is a helper method to define mock.On call -// - rollupExitRoot common.Hash -func (_e *L1InfoTreer_Expecter) GetFirstL1InfoWithRollupExitRoot(rollupExitRoot interface{}) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { - return &L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call{Call: _e.mock.On("GetFirstL1InfoWithRollupExitRoot", rollupExitRoot)} -} - -func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Run(run func(rollupExitRoot common.Hash)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call) RunAndReturn(run func(common.Hash) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetFirstL1InfoWithRollupExitRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetFirstVerifiedBatches provides a mock function with given fields: rollupID -func (_m *L1InfoTreer) GetFirstVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { - ret := _m.Called(rollupID) - - if len(ret) == 0 { - panic("no return value specified for GetFirstVerifiedBatches") - } - - var r0 *l1infotreesync.VerifyBatches - var r1 error - if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { - return rf(rollupID) - } - if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { - r0 = rf(rollupID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) - } - } - - if rf, ok := ret.Get(1).(func(uint32) error); ok { - r1 = rf(rollupID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetFirstVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatches' -type L1InfoTreer_GetFirstVerifiedBatches_Call struct { - *mock.Call -} - -// GetFirstVerifiedBatches is a helper method to define mock.On call -// - rollupID uint32 -func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetFirstVerifiedBatches_Call { - return &L1InfoTreer_GetFirstVerifiedBatches_Call{Call: _e.mock.On("GetFirstVerifiedBatches", rollupID)} -} - -func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetFirstVerifiedBatches_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32)) - }) - return _c -} - -func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatches_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetFirstVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatches_Call { - _c.Call.Return(run) - return _c -} - -// GetFirstVerifiedBatchesAfterBlock provides a mock function with given fields: rollupID, blockNum -func (_m *L1InfoTreer) GetFirstVerifiedBatchesAfterBlock(rollupID uint32, blockNum uint64) (*l1infotreesync.VerifyBatches, error) { - ret := _m.Called(rollupID, blockNum) - - if len(ret) == 0 { - panic("no return value specified for GetFirstVerifiedBatchesAfterBlock") - } - - var r0 *l1infotreesync.VerifyBatches - var r1 error - if rf, ok := ret.Get(0).(func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)); ok { - return rf(rollupID, blockNum) - } - if rf, ok := ret.Get(0).(func(uint32, uint64) *l1infotreesync.VerifyBatches); ok { - r0 = rf(rollupID, blockNum) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) - } - } - - if rf, ok := ret.Get(1).(func(uint32, uint64) error); ok { - r1 = rf(rollupID, blockNum) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstVerifiedBatchesAfterBlock' -type L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call struct { - *mock.Call -} - -// GetFirstVerifiedBatchesAfterBlock is a helper method to define mock.On call -// - rollupID uint32 -// - blockNum uint64 -func (_e *L1InfoTreer_Expecter) GetFirstVerifiedBatchesAfterBlock(rollupID interface{}, blockNum interface{}) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { - return &L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call{Call: _e.mock.On("GetFirstVerifiedBatchesAfterBlock", rollupID, blockNum)} -} - -func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Run(run func(rollupID uint32, blockNum uint64)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32), args[1].(uint64)) - }) - return _c -} - -func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call) RunAndReturn(run func(uint32, uint64) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetFirstVerifiedBatchesAfterBlock_Call { - _c.Call.Return(run) - return _c -} - -// GetInfoByIndex provides a mock function with given fields: ctx, index -func (_m *L1InfoTreer) GetInfoByIndex(ctx context.Context, index uint32) (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called(ctx, index) - - if len(ret) == 0 { - panic("no return value specified for GetInfoByIndex") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf(ctx, index) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf(ctx, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetInfoByIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInfoByIndex' -type L1InfoTreer_GetInfoByIndex_Call struct { - *mock.Call -} - -// GetInfoByIndex is a helper method to define mock.On call -// - ctx context.Context -// - index uint32 -func (_e *L1InfoTreer_Expecter) GetInfoByIndex(ctx interface{}, index interface{}) *L1InfoTreer_GetInfoByIndex_Call { - return &L1InfoTreer_GetInfoByIndex_Call{Call: _e.mock.On("GetInfoByIndex", ctx, index)} -} - -func (_c *L1InfoTreer_GetInfoByIndex_Call) Run(run func(ctx context.Context, index uint32)) *L1InfoTreer_GetInfoByIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *L1InfoTreer_GetInfoByIndex_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetInfoByIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetInfoByIndex_Call) RunAndReturn(run func(context.Context, uint32) (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetInfoByIndex_Call { - _c.Call.Return(run) - return _c -} - -// GetLastInfo provides a mock function with given fields: -func (_m *L1InfoTreer) GetLastInfo() (*l1infotreesync.L1InfoTreeLeaf, error) { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for GetLastInfo") - } - - var r0 *l1infotreesync.L1InfoTreeLeaf - var r1 error - if rf, ok := ret.Get(0).(func() (*l1infotreesync.L1InfoTreeLeaf, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() *l1infotreesync.L1InfoTreeLeaf); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.L1InfoTreeLeaf) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetLastInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastInfo' -type L1InfoTreer_GetLastInfo_Call struct { - *mock.Call -} - -// GetLastInfo is a helper method to define mock.On call -func (_e *L1InfoTreer_Expecter) GetLastInfo() *L1InfoTreer_GetLastInfo_Call { - return &L1InfoTreer_GetLastInfo_Call{Call: _e.mock.On("GetLastInfo")} -} - -func (_c *L1InfoTreer_GetLastInfo_Call) Run(run func()) *L1InfoTreer_GetLastInfo_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *L1InfoTreer_GetLastInfo_Call) Return(_a0 *l1infotreesync.L1InfoTreeLeaf, _a1 error) *L1InfoTreer_GetLastInfo_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetLastInfo_Call) RunAndReturn(run func() (*l1infotreesync.L1InfoTreeLeaf, error)) *L1InfoTreer_GetLastInfo_Call { - _c.Call.Return(run) - return _c -} - -// GetLastVerifiedBatches provides a mock function with given fields: rollupID -func (_m *L1InfoTreer) GetLastVerifiedBatches(rollupID uint32) (*l1infotreesync.VerifyBatches, error) { - ret := _m.Called(rollupID) - - if len(ret) == 0 { - panic("no return value specified for GetLastVerifiedBatches") - } - - var r0 *l1infotreesync.VerifyBatches - var r1 error - if rf, ok := ret.Get(0).(func(uint32) (*l1infotreesync.VerifyBatches, error)); ok { - return rf(rollupID) - } - if rf, ok := ret.Get(0).(func(uint32) *l1infotreesync.VerifyBatches); ok { - r0 = rf(rollupID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*l1infotreesync.VerifyBatches) - } - } - - if rf, ok := ret.Get(1).(func(uint32) error); ok { - r1 = rf(rollupID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetLastVerifiedBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastVerifiedBatches' -type L1InfoTreer_GetLastVerifiedBatches_Call struct { - *mock.Call -} - -// GetLastVerifiedBatches is a helper method to define mock.On call -// - rollupID uint32 -func (_e *L1InfoTreer_Expecter) GetLastVerifiedBatches(rollupID interface{}) *L1InfoTreer_GetLastVerifiedBatches_Call { - return &L1InfoTreer_GetLastVerifiedBatches_Call{Call: _e.mock.On("GetLastVerifiedBatches", rollupID)} -} - -func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Run(run func(rollupID uint32)) *L1InfoTreer_GetLastVerifiedBatches_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(uint32)) - }) - return _c -} - -func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) Return(_a0 *l1infotreesync.VerifyBatches, _a1 error) *L1InfoTreer_GetLastVerifiedBatches_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetLastVerifiedBatches_Call) RunAndReturn(run func(uint32) (*l1infotreesync.VerifyBatches, error)) *L1InfoTreer_GetLastVerifiedBatches_Call { - _c.Call.Return(run) - return _c -} - -// GetLocalExitRoot provides a mock function with given fields: ctx, networkID, rollupExitRoot -func (_m *L1InfoTreer) GetLocalExitRoot(ctx context.Context, networkID uint32, rollupExitRoot common.Hash) (common.Hash, error) { - ret := _m.Called(ctx, networkID, rollupExitRoot) - - if len(ret) == 0 { - panic("no return value specified for GetLocalExitRoot") - } - - var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (common.Hash, error)); ok { - return rf(ctx, networkID, rollupExitRoot) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) common.Hash); ok { - r0 = rf(ctx, networkID, rollupExitRoot) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.Hash) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { - r1 = rf(ctx, networkID, rollupExitRoot) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetLocalExitRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLocalExitRoot' -type L1InfoTreer_GetLocalExitRoot_Call struct { - *mock.Call -} - -// GetLocalExitRoot is a helper method to define mock.On call -// - ctx context.Context -// - networkID uint32 -// - rollupExitRoot common.Hash -func (_e *L1InfoTreer_Expecter) GetLocalExitRoot(ctx interface{}, networkID interface{}, rollupExitRoot interface{}) *L1InfoTreer_GetLocalExitRoot_Call { - return &L1InfoTreer_GetLocalExitRoot_Call{Call: _e.mock.On("GetLocalExitRoot", ctx, networkID, rollupExitRoot)} -} - -func (_c *L1InfoTreer_GetLocalExitRoot_Call) Run(run func(ctx context.Context, networkID uint32, rollupExitRoot common.Hash)) *L1InfoTreer_GetLocalExitRoot_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreer_GetLocalExitRoot_Call) Return(_a0 common.Hash, _a1 error) *L1InfoTreer_GetLocalExitRoot_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetLocalExitRoot_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (common.Hash, error)) *L1InfoTreer_GetLocalExitRoot_Call { - _c.Call.Return(run) - return _c -} - -// GetRollupExitTreeMerkleProof provides a mock function with given fields: ctx, networkID, root -func (_m *L1InfoTreer) GetRollupExitTreeMerkleProof(ctx context.Context, networkID uint32, root common.Hash) (types.Proof, error) { - ret := _m.Called(ctx, networkID, root) - - if len(ret) == 0 { - panic("no return value specified for GetRollupExitTreeMerkleProof") - } - - var r0 types.Proof - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) (types.Proof, error)); ok { - return rf(ctx, networkID, root) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32, common.Hash) types.Proof); ok { - r0 = rf(ctx, networkID, root) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.Proof) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32, common.Hash) error); ok { - r1 = rf(ctx, networkID, root) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L1InfoTreer_GetRollupExitTreeMerkleProof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupExitTreeMerkleProof' -type L1InfoTreer_GetRollupExitTreeMerkleProof_Call struct { - *mock.Call -} - -// GetRollupExitTreeMerkleProof is a helper method to define mock.On call -// - ctx context.Context -// - networkID uint32 -// - root common.Hash -func (_e *L1InfoTreer_Expecter) GetRollupExitTreeMerkleProof(ctx interface{}, networkID interface{}, root interface{}) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { - return &L1InfoTreer_GetRollupExitTreeMerkleProof_Call{Call: _e.mock.On("GetRollupExitTreeMerkleProof", ctx, networkID, root)} -} - -func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Run(run func(ctx context.Context, networkID uint32, root common.Hash)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32), args[2].(common.Hash)) - }) - return _c -} - -func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) Return(_a0 types.Proof, _a1 error) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L1InfoTreer_GetRollupExitTreeMerkleProof_Call) RunAndReturn(run func(context.Context, uint32, common.Hash) (types.Proof, error)) *L1InfoTreer_GetRollupExitTreeMerkleProof_Call { - _c.Call.Return(run) - return _c -} - -// NewL1InfoTreer creates a new instance of L1InfoTreer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL1InfoTreer(t interface { - mock.TestingT - Cleanup(func()) -}) *L1InfoTreer { - mock := &L1InfoTreer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/mocks/last_ge_rer.go b/rpc/mocks/last_ge_rer.go deleted file mode 100644 index 7b338e2e7..000000000 --- a/rpc/mocks/last_ge_rer.go +++ /dev/null @@ -1,94 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - context "context" - - lastgersync "github.com/0xPolygon/cdk/lastgersync" - mock "github.com/stretchr/testify/mock" -) - -// LastGERer is an autogenerated mock type for the LastGERer type -type LastGERer struct { - mock.Mock -} - -type LastGERer_Expecter struct { - mock *mock.Mock -} - -func (_m *LastGERer) EXPECT() *LastGERer_Expecter { - return &LastGERer_Expecter{mock: &_m.Mock} -} - -// GetFirstGERAfterL1InfoTreeIndex provides a mock function with given fields: ctx, atOrAfterL1InfoTreeIndex -func (_m *LastGERer) GetFirstGERAfterL1InfoTreeIndex(ctx context.Context, atOrAfterL1InfoTreeIndex uint32) (lastgersync.Event, error) { - ret := _m.Called(ctx, atOrAfterL1InfoTreeIndex) - - if len(ret) == 0 { - panic("no return value specified for GetFirstGERAfterL1InfoTreeIndex") - } - - var r0 lastgersync.Event - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint32) (lastgersync.Event, error)); ok { - return rf(ctx, atOrAfterL1InfoTreeIndex) - } - if rf, ok := ret.Get(0).(func(context.Context, uint32) lastgersync.Event); ok { - r0 = rf(ctx, atOrAfterL1InfoTreeIndex) - } else { - r0 = ret.Get(0).(lastgersync.Event) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { - r1 = rf(ctx, atOrAfterL1InfoTreeIndex) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstGERAfterL1InfoTreeIndex' -type LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call struct { - *mock.Call -} - -// GetFirstGERAfterL1InfoTreeIndex is a helper method to define mock.On call -// - ctx context.Context -// - atOrAfterL1InfoTreeIndex uint32 -func (_e *LastGERer_Expecter) GetFirstGERAfterL1InfoTreeIndex(ctx interface{}, atOrAfterL1InfoTreeIndex interface{}) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { - return &LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call{Call: _e.mock.On("GetFirstGERAfterL1InfoTreeIndex", ctx, atOrAfterL1InfoTreeIndex)} -} - -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Run(run func(ctx context.Context, atOrAfterL1InfoTreeIndex uint32)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint32)) - }) - return _c -} - -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) Return(_a0 lastgersync.Event, _a1 error) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call) RunAndReturn(run func(context.Context, uint32) (lastgersync.Event, error)) *LastGERer_GetFirstGERAfterL1InfoTreeIndex_Call { - _c.Call.Return(run) - return _c -} - -// NewLastGERer creates a new instance of LastGERer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewLastGERer(t interface { - mock.TestingT - Cleanup(func()) -}) *LastGERer { - mock := &LastGERer{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/rpc/openrpc.json b/rpc/openrpc.json deleted file mode 100644 index 4e3a2518e..000000000 --- a/rpc/openrpc.json +++ /dev/null @@ -1,386 +0,0 @@ -{ - "openrpc": "1.0.0", - "info": { - "title": "CDK Endpoints", - "version": "0.0.1" - }, - "methods": [ - { - "name": "bridge_l1InfoTreeIndexForBridge", - "summary": "Returns the first L1 Info Tree index in which the bridge was included. NetworkID represents the origin network. This call needs to be done to a client of the same network were the bridge tx was sent", - "params": [ - { - "$ref": "#/components/contentDescriptors/NetworkID" - }, - { - "$ref": "#/components/contentDescriptors/DepositCount" - } - ], - "result": { - "$ref": "#/components/contentDescriptors/L1InfoTreeIndex" - }, - "examples": [ - { - "name": "example", - "params": [], - "result": { - "name": "exampleResult", - "value": "0x1" - } - } - ] - }, - { - "name": "bridge_injectedInfoAfterIndex", - "summary": "Return the first GER injected onto the network that is linked to the given index or greater. This call is useful to understand when a bridge is ready to be claimed on its destination network", - "params": [ - { - "$ref": "#/components/contentDescriptors/NetworkID" - }, - { - "$ref": "#/components/contentDescriptors/L1InfoTreeIndex" - } - ], - "result": { - "$ref": "#/components/contentDescriptors/L1InfoTreeLeaf" - }, - "examples": [ ] - }, - { - "name": "bridge_getProof", - "summary": "Gets the proof needed to perform a claim for a given bridge", - "params": [ - { - "$ref": "#/components/contentDescriptors/NetworkID" - }, - { - "$ref": "#/components/contentDescriptors/DepositCount" - }, - { - "$ref": "#/components/contentDescriptors/L1InfoTreeIndex" - } - ], - "result": { - "$ref": "#/components/contentDescriptors/Proof" - }, - "examples": [] - }, - { - "name": "bridge_sponsorClaim", - "summary": "Request to sponsor the claim tx for a given bridge", - "params": [ - { - "$ref": "#/components/contentDescriptors/SponsorClaim" - } - ], - "result": { - "name": "empty", - "schema": {"type": "null"} - }, - "examples": [] - }, - { - "name": "bridge_getSponsoredClaimStatus", - "summary": "Gets the proof needed to perform a claim for a given bridge", - "params": [ - { - "$ref": "#/components/contentDescriptors/GlobalIndex" - } - ], - "result": { - "$ref": "#/components/contentDescriptors/ClaimStatus" - }, - "examples": [] - } - ], - "components": { - "contentDescriptors": { - "NetworkID": { - "name": "networkID", - "required": true, - "schema": { - "$ref": "#/components/schemas/NetworkID" - } - }, - "DepositCount": { - "name": "depositCount", - "required": true, - "schema": { - "$ref": "#/components/schemas/DepositCount" - } - }, - "L1InfoTreeIndex": { - "name": "l1InfoTreeIndex", - "required": true, - "schema": { - "$ref": "#/components/schemas/L1InfoTreeIndex" - } - }, - "L1InfoTreeLeaf": { - "name": "l1InfoTreeLeaf", - "required": true, - "schema": { - "$ref": "#/components/schemas/L1InfoTreeLeaf" - } - }, - "Proof": { - "name": "proof", - "required": true, - "schema": { - "$ref": "#/components/schemas/Proof" - } - }, - "SponsorClaim": { - "name": "sponsorClaim", - "required": true, - "schema": { - "$ref": "#/components/schemas/SponsorClaim" - } - }, - "GlobalIndex": { - "name": "globalIndex", - "required": true, - "schema": { - "$ref": "#/components/schemas/GlobalIndex" - } - }, - "ClaimStatus": { - "name": "claimStatus", - "required": true, - "schema": { - "$ref": "#/components/schemas/ClaimStatus" - } - } - }, - "schemas": { - "Bytes": { - "title": "bytes", - "type": "string", - "description": "Hex representation of a variable length byte array", - "pattern": "^0x([a-fA-F0-9]?)+$" - }, - "Integer": { - "title": "integer", - "type": "string", - "pattern": "^0x[a-fA-F0-9]+$", - "description": "Hex representation of the integer" - }, - "Keccak": { - "title": "keccak", - "type": "string", - "description": "Hex representation of a Keccak 256 hash", - "pattern": "^0x[a-fA-F\\d]{64}$" - }, - "Address": { - "title": "address", - "type": "string", - "pattern": "^0x[a-fA-F\\d]{40}$" - }, - "BlockHash": { - "title": "blockHash", - "type": "string", - "pattern": "^0x[a-fA-F\\d]{64}$", - "description": "The hex representation of the Keccak 256 of the RLP encoded block" - }, - "BlockNumber": { - "title": "blockNumber", - "type": "string", - "description": "The hex representation of the block's height", - "$ref": "#/components/schemas/Integer" - }, - "BlockPosition": { - "title": "blockPosition", - "type": "string", - "description": "The hex representation of the position inside the block", - "$ref": "#/components/schemas/Integer" - }, - "NetworkID": { - "title": "networkID", - "type": "string", - "description": "The hex representation of the network ID", - "$ref": "#/components/schemas/Integer" - }, - "DepositCount": { - "title": "depositCount", - "type": "string", - "description": "The hex representation of the deposit count", - "$ref": "#/components/schemas/Integer" - }, - "L1InfoTreeIndex": { - "title": "l1InfoTreeIndex", - "type": "string", - "description": "The hex representation of the L1 info tree index", - "$ref": "#/components/schemas/Integer" - }, - "L1InfoTreeLeaf": { - "title": "l1InfoTreeLeaf", - "type": "object", - "readOnly": true, - "properties": { - "blockNumber": { - "$ref": "#/components/schemas/BlockNumber" - }, - "blockPosition": { - "$ref": "#/components/schemas/BlockPosition" - }, - "previousBlockHash": { - "$ref": "#/components/schemas/Keccak" - }, - "timestamp": { - "title": "blockTimeStamp", - "type": "string", - "description": "The unix timestamp for when the block was collated" - }, - "l1InfoTreeIndex": { - "$ref": "#/components/schemas/L1InfoTreeIndex" - }, - "mainnetExitRoot": { - "$ref": "#/components/schemas/Keccak" - }, - "rollupExitRoot": { - "$ref": "#/components/schemas/Keccak" - }, - "globalExitRoot": { - "$ref": "#/components/schemas/Keccak" - }, - "hash": { - "$ref": "#/components/schemas/Keccak" - } - } - }, - "MerkleProof": { - "title": "merkleProof", - "type": "array", - "description": "Array of hashes that constitute a merkle proof", - "items": { - "$ref": "#/components/schemas/Keccak" - } - }, - "ProofLocalExitRoot": { - "title": "proofLocalExitRoot", - "description": "Merkle Proof that proofs the existance of a deposit in the local exit tree of a network", - "$ref": "#/components/schemas/MerkleProof" - }, - "ProofRollupExitRoot": { - "title": "proofLocalExitRoot", - "description": "Merkle Proof that proofs the existance of a deposit in the local exit tree of a network", - "$ref": "#/components/schemas/MerkleProof" - }, - "Proof": { - "title": "proof", - "type": "object", - "readOnly": true, - "properties": { - "l1InfoTreeLeaf": { - "$ref": "#/components/schemas/L1InfoTreeLeaf" - }, - "proofLocalExitRoot": { - "$ref": "#/components/schemas/ProofLocalExitRoot" - }, - "proofRollupExitRoot": { - "$ref": "#/components/schemas/ProofRollupExitRoot" - } - } - }, - "LeafType": { - "title": "leafType", - "type": "string", - "description": "The hex representation of the leaf type", - "$ref": "#/components/schemas/Integer" - }, - "GlobalIndex": { - "title": "globalIndex", - "type": "string", - "description": "The hex representation of the global index", - "$ref": "#/components/schemas/Integer" - }, - "OriginNetwork": { - "title": "originNetwork", - "type": "string", - "description": "The hex representation of the origin network ID of the token", - "$ref": "#/components/schemas/Integer" - }, - "OriginTokenAddress": { - "title": "originTokenAddress", - "type": "string", - "description": "address of the token on it's origin network", - "$ref": "#/components/schemas/Address" - }, - "DestinationNetwork": { - "title": "destinationNetwork", - "type": "string", - "description": "The hex representation of the destination network ID", - "$ref": "#/components/schemas/Integer" - }, - "DestinationAddress": { - "title": "destinationAddress", - "type": "string", - "description": "address of the receiver of the bridge", - "$ref": "#/components/schemas/Address" - }, - "Amount": { - "title": "amount", - "description": "Amount of tokens being bridged", - "$ref": "#/components/schemas/Keccak" - }, - "Metadata": { - "title": "metadata", - "description": "Extra data included in the bridge", - "$ref": "#/components/schemas/Bytes" - }, - "SponsorClaim": { - "title": "sponsorClaim", - "type": "object", - "readOnly": true, - "properties": { - "leafType": { - "$ref": "#/components/schemas/LeafType" - }, - "proofLocalExitRoot": { - "$ref": "#/components/schemas/ProofLocalExitRoot" - }, - "proofRollupExitRoot": { - "$ref": "#/components/schemas/ProofRollupExitRoot" - }, - "globalIndex": { - "$ref": "#/components/schemas/GlobalIndex" - }, - "mainnetExitRoot": { - "$ref": "#/components/schemas/Keccak" - }, - "rollupExitRoot": { - "$ref": "#/components/schemas/Keccak" - }, - "originNetwork": { - "$ref": "#/components/schemas/OriginNetwork" - }, - "originTokenAddress": { - "$ref": "#/components/schemas/OriginTokenAddress" - }, - "destinationNetwork": { - "$ref": "#/components/schemas/DestinationNetwork" - }, - "destinationAddress": { - "$ref": "#/components/schemas/DestinationAddress" - }, - "amount": { - "$ref": "#/components/schemas/Amount" - }, - "metadata": { - "$ref": "#/components/schemas/Metadata" - } - } - }, - "ClaimStatus": { - "title": "claimStatus", - "description": "The status of a claim", - "type": "string", - "enum": [ - "pending", - "failed", - "success" - ] - } - } - } -} diff --git a/rpc/types/bridge.go b/rpc/types/bridge.go deleted file mode 100644 index eb8c64645..000000000 --- a/rpc/types/bridge.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -import ( - "github.com/0xPolygon/cdk/l1infotreesync" - tree "github.com/0xPolygon/cdk/tree/types" -) - -type ClaimProof struct { - ProofLocalExitRoot tree.Proof - ProofRollupExitRoot tree.Proof - L1InfoTreeLeaf l1infotreesync.L1InfoTreeLeaf -} diff --git a/scripts/local_config b/scripts/local_config index 90b5ae116..f052d927f 100755 --- a/scripts/local_config +++ b/scripts/local_config @@ -202,10 +202,7 @@ function export_values_of_cdk_node_config(){ if [ $? -ne 0 ]; then export_key_from_toml_file_or_fatal zkevm_rollup_fork_id $_CDK_CONFIG_FILE "." ForkId fi - export_key_from_toml_file zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE AggSender.SequencerPrivateKey Password - if [ $? -ne 0 ]; then - export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE "." SequencerPrivateKeyPassword - fi + export_key_from_toml_file_or_fatal zkevm_l2_agglayer_keystore_password $_CDK_CONFIG_FILE "." SequencerPrivateKeyPassword export_key_from_toml_file zkevm_bridge_address $_CDK_CONFIG_FILE BridgeL1Sync BridgeAddr if [ $? -ne 0 ]; then export_key_from_toml_file_or_fatal zkevm_bridge_address $_CDK_CONFIG_FILE "." polygonBridgeAddr @@ -436,9 +433,6 @@ cat << EOF "-components", "sequence-sender,aggregator", ] }, - - To run AggSender change components to: - "-components", "aggsender", EOF echo " -----------------------------------------------------------" diff --git a/sequencesender/config.go b/sequencesender/config.go index 4f77500b2..80f473621 100644 --- a/sequencesender/config.go +++ b/sequencesender/config.go @@ -2,8 +2,8 @@ package sequencesender import ( "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ) diff --git a/sequencesender/ethtx.go b/sequencesender/ethtx.go index 3234992e4..b71324fc0 100644 --- a/sequencesender/ethtx.go +++ b/sequencesender/ethtx.go @@ -11,9 +11,9 @@ import ( "sync/atomic" "time" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ) diff --git a/sequencesender/ethtx_test.go b/sequencesender/ethtx_test.go index 1bc21535a..5cf915c64 100644 --- a/sequencesender/ethtx_test.go +++ b/sequencesender/ethtx_test.go @@ -9,10 +9,10 @@ import ( "testing" "time" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/mocks" "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" diff --git a/sequencesender/mocks/mock_ethtxmanager.go b/sequencesender/mocks/mock_eth_tx_manager.go similarity index 80% rename from sequencesender/mocks/mock_ethtxmanager.go rename to sequencesender/mocks/mock_eth_tx_manager.go index b8a58d0d2..62c6352ea 100644 --- a/sequencesender/mocks/mock_ethtxmanager.go +++ b/sequencesender/mocks/mock_eth_tx_manager.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks @@ -28,6 +28,69 @@ func (_m *EthTxManagerMock) EXPECT() *EthTxManagerMock_Expecter { return &EthTxManagerMock_Expecter{mock: &_m.Mock} } +// Add provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar +func (_m *EthTxManagerMock) Add(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar) + + if len(ret) == 0 { + panic("no return value specified for Add") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthTxManagerMock_Add_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Add' +type EthTxManagerMock_Add_Call struct { + *mock.Call +} + +// Add is a helper method to define mock.On call +// - ctx context.Context +// - to *common.Address +// - value *big.Int +// - data []byte +// - gasOffset uint64 +// - sidecar *types.BlobTxSidecar +func (_e *EthTxManagerMock_Expecter) Add(ctx interface{}, to interface{}, value interface{}, data interface{}, gasOffset interface{}, sidecar interface{}) *EthTxManagerMock_Add_Call { + return &EthTxManagerMock_Add_Call{Call: _e.mock.On("Add", ctx, to, value, data, gasOffset, sidecar)} +} + +func (_c *EthTxManagerMock_Add_Call) Run(run func(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar)) *EthTxManagerMock_Add_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*common.Address), args[2].(*big.Int), args[3].([]byte), args[4].(uint64), args[5].(*types.BlobTxSidecar)) + }) + return _c +} + +func (_c *EthTxManagerMock_Add_Call) Return(_a0 common.Hash, _a1 error) *EthTxManagerMock_Add_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthTxManagerMock_Add_Call) RunAndReturn(run func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar) (common.Hash, error)) *EthTxManagerMock_Add_Call { + _c.Call.Return(run) + return _c +} + // AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas func (_m *EthTxManagerMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) diff --git a/sequencesender/mocks/mock_etherman.go b/sequencesender/mocks/mock_etherman.go index 298d96c3b..72bc1a023 100644 --- a/sequencesender/mocks/mock_etherman.go +++ b/sequencesender/mocks/mock_etherman.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks diff --git a/sequencesender/mocks/mock_rpc.go b/sequencesender/mocks/mock_rpc_interface.go similarity index 98% rename from sequencesender/mocks/mock_rpc.go rename to sequencesender/mocks/mock_rpc_interface.go index eef0188df..7a2b9950a 100644 --- a/sequencesender/mocks/mock_rpc.go +++ b/sequencesender/mocks/mock_rpc_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks diff --git a/sequencesender/mocks/mock_txbuilder.go b/sequencesender/mocks/mock_tx_builder.go similarity index 99% rename from sequencesender/mocks/mock_txbuilder.go rename to sequencesender/mocks/mock_tx_builder.go index 0607313b8..70738e5e8 100644 --- a/sequencesender/mocks/mock_txbuilder.go +++ b/sequencesender/mocks/mock_tx_builder.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.40.1. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks @@ -307,7 +307,7 @@ func (_c *TxBuilderMock_SetCondNewSeq_Call) RunAndReturn(run func(txbuilder.Cond return _c } -// String provides a mock function with given fields: +// String provides a mock function with no fields func (_m *TxBuilderMock) String() string { ret := _m.Called() diff --git a/sequencesender/sequencesender.go b/sequencesender/sequencesender.go index 6f602b421..41a56709b 100644 --- a/sequencesender/sequencesender.go +++ b/sequencesender/sequencesender.go @@ -11,7 +11,6 @@ import ( "time" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/rpc" "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" @@ -20,6 +19,7 @@ import ( "github.com/0xPolygon/zkevm-ethtx-manager/ethtxmanager" ethtxlog "github.com/0xPolygon/zkevm-ethtx-manager/log" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" ) @@ -29,6 +29,13 @@ const ten = 10 // EthTxManager represents the eth tx manager interface type EthTxManager interface { Start() + Add(ctx context.Context, + to *common.Address, + value *big.Int, + data []byte, + gasOffset uint64, + sidecar *ethtypes.BlobTxSidecar, + ) (common.Hash, error) AddWithGas( ctx context.Context, to *common.Address, diff --git a/sequencesender/sequencesender_test.go b/sequencesender/sequencesender_test.go index e1d694e5b..052719d34 100644 --- a/sequencesender/sequencesender_test.go +++ b/sequencesender/sequencesender_test.go @@ -9,13 +9,13 @@ import ( types2 "github.com/0xPolygon/cdk/config/types" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" rpctypes "github.com/0xPolygon/cdk/rpc/types" "github.com/0xPolygon/cdk/sequencesender/mocks" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/mock" diff --git a/sequencesender/txbuilder/banana_base.go b/sequencesender/txbuilder/banana_base.go index ee21228d0..009149340 100644 --- a/sequencesender/txbuilder/banana_base.go +++ b/sequencesender/txbuilder/banana_base.go @@ -8,10 +8,10 @@ import ( cdkcommon "github.com/0xPolygon/cdk/common" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/banana_base_test.go b/sequencesender/txbuilder/banana_base_test.go index e59115005..dd2854b4d 100644 --- a/sequencesender/txbuilder/banana_base_test.go +++ b/sequencesender/txbuilder/banana_base_test.go @@ -6,12 +6,12 @@ import ( "math/big" "testing" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/banana_validium.go b/sequencesender/txbuilder/banana_validium.go index 68fa67622..586a44754 100644 --- a/sequencesender/txbuilder/banana_validium.go +++ b/sequencesender/txbuilder/banana_validium.go @@ -8,8 +8,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygonvalidiumetrog" "github.com/0xPolygon/cdk/dataavailability" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/banana_validium_test.go b/sequencesender/txbuilder/banana_validium_test.go index 71f059b98..19f6f1874 100644 --- a/sequencesender/txbuilder/banana_validium_test.go +++ b/sequencesender/txbuilder/banana_validium_test.go @@ -8,12 +8,12 @@ import ( "testing" "github.com/0xPolygon/cdk/dataavailability/mocks_da" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/banana_zkevm.go b/sequencesender/txbuilder/banana_zkevm.go index 42668323c..199da0438 100644 --- a/sequencesender/txbuilder/banana_zkevm.go +++ b/sequencesender/txbuilder/banana_zkevm.go @@ -6,8 +6,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/banana/polygonvalidiumetrog" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/banana_zkevm_test.go b/sequencesender/txbuilder/banana_zkevm_test.go index 4570729e7..4230783b8 100644 --- a/sequencesender/txbuilder/banana_zkevm_test.go +++ b/sequencesender/txbuilder/banana_zkevm_test.go @@ -7,12 +7,12 @@ import ( "strings" "testing" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/sequencesender/txbuilder/mocks_txbuilder" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/elderberry_base.go b/sequencesender/txbuilder/elderberry_base.go index 8e61e174b..7f2bffa52 100644 --- a/sequencesender/txbuilder/elderberry_base.go +++ b/sequencesender/txbuilder/elderberry_base.go @@ -4,9 +4,9 @@ import ( "context" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" ) diff --git a/sequencesender/txbuilder/elderberry_base_test.go b/sequencesender/txbuilder/elderberry_base_test.go index 806a47f87..1d518c605 100644 --- a/sequencesender/txbuilder/elderberry_base_test.go +++ b/sequencesender/txbuilder/elderberry_base_test.go @@ -4,9 +4,9 @@ import ( "context" "testing" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" diff --git a/sequencesender/txbuilder/elderberry_validium.go b/sequencesender/txbuilder/elderberry_validium.go index 62973b025..9f769e7e2 100644 --- a/sequencesender/txbuilder/elderberry_validium.go +++ b/sequencesender/txbuilder/elderberry_validium.go @@ -10,8 +10,8 @@ import ( "github.com/0xPolygon/cdk/dataavailability" "github.com/0xPolygon/cdk/etherman" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/elderberry_validium_test.go b/sequencesender/txbuilder/elderberry_validium_test.go index 6ca80a58b..92e22c367 100644 --- a/sequencesender/txbuilder/elderberry_validium_test.go +++ b/sequencesender/txbuilder/elderberry_validium_test.go @@ -10,10 +10,10 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/dataavailability/mocks_da" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" diff --git a/sequencesender/txbuilder/elderberry_zkevm.go b/sequencesender/txbuilder/elderberry_zkevm.go index a4d3bb567..c784d595e 100644 --- a/sequencesender/txbuilder/elderberry_zkevm.go +++ b/sequencesender/txbuilder/elderberry_zkevm.go @@ -7,8 +7,8 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/sequencesender/txbuilder/elderberry_zkevm_test.go b/sequencesender/txbuilder/elderberry_zkevm_test.go index 3544a7009..3afeba2de 100644 --- a/sequencesender/txbuilder/elderberry_zkevm_test.go +++ b/sequencesender/txbuilder/elderberry_zkevm_test.go @@ -8,10 +8,10 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/elderberry/polygonvalidiumetrog" "github.com/0xPolygon/cdk/etherman/contracts" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" "github.com/0xPolygon/cdk/sequencesender/txbuilder" "github.com/0xPolygon/cdk/state/datastream" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" diff --git a/sequencesender/txbuilder/mocks_txbuilder/cond_new_sequence.go b/sequencesender/txbuilder/mocks_txbuilder/mock_cond_new_sequence.go similarity index 98% rename from sequencesender/txbuilder/mocks_txbuilder/cond_new_sequence.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_cond_new_sequence.go index ae818ce98..d38905526 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/cond_new_sequence.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_cond_new_sequence.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/global_exit_root_banana_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_global_exit_root_banana_contractor.go similarity index 98% rename from sequencesender/txbuilder/mocks_txbuilder/global_exit_root_banana_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_global_exit_root_banana_contractor.go index 86fd4366b..1c952c660 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/global_exit_root_banana_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_global_exit_root_banana_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/global_exit_root_banana_zkevm_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_global_exit_root_banana_zkevm_contractor.go similarity index 98% rename from sequencesender/txbuilder/mocks_txbuilder/global_exit_root_banana_zkevm_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_global_exit_root_banana_zkevm_contractor.go index 57c6c1576..7006660f3 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/global_exit_root_banana_zkevm_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_global_exit_root_banana_zkevm_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/l1_client.go b/sequencesender/txbuilder/mocks_txbuilder/mock_l1_client.go similarity index 98% rename from sequencesender/txbuilder/mocks_txbuilder/l1_client.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_l1_client.go index 853494f98..c7ff99fc0 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/l1_client.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_l1_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go b/sequencesender/txbuilder/mocks_txbuilder/mock_l1_info_syncer.go similarity index 97% rename from sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_l1_info_syncer.go index 12d641a83..c88b38318 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/l1_info_syncer.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_l1_info_syncer.go @@ -1,11 +1,11 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder import ( context "context" - l1infotreesync "github.com/0xPolygon/cdk/l1infotreesync" + l1infotreesync "github.com/agglayer/aggkit/l1infotreesync" mock "github.com/stretchr/testify/mock" ) diff --git a/sequencesender/txbuilder/mocks_txbuilder/rollup_banana_base_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_base_contractor.go similarity index 98% rename from sequencesender/txbuilder/mocks_txbuilder/rollup_banana_base_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_base_contractor.go index acd82a4ee..4d06e8c8d 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/rollup_banana_base_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_base_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/rollup_banana_validium_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_validium_contractor.go similarity index 99% rename from sequencesender/txbuilder/mocks_txbuilder/rollup_banana_validium_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_validium_contractor.go index a59b88dd5..ef98103f9 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/rollup_banana_validium_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_validium_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/rollup_banana_zkevm_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_zkevm_contractor.go similarity index 99% rename from sequencesender/txbuilder/mocks_txbuilder/rollup_banana_zkevm_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_zkevm_contractor.go index e29e32529..77b70a13a 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/rollup_banana_zkevm_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_banana_zkevm_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/rollup_elderberry_validium_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_elderberry_validium_contractor.go similarity index 99% rename from sequencesender/txbuilder/mocks_txbuilder/rollup_elderberry_validium_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_rollup_elderberry_validium_contractor.go index 0d94c0810..9b3d35d5a 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/rollup_elderberry_validium_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_elderberry_validium_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/rollup_elderberry_zkevm_contractor.go b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_elderberry_zkevm_contractor.go similarity index 98% rename from sequencesender/txbuilder/mocks_txbuilder/rollup_elderberry_zkevm_contractor.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_rollup_elderberry_zkevm_contractor.go index 1ed208ab2..db5beac34 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/rollup_elderberry_zkevm_contractor.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_rollup_elderberry_zkevm_contractor.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/mocks_txbuilder/tx_builder.go b/sequencesender/txbuilder/mocks_txbuilder/mock_tx_builder.go similarity index 99% rename from sequencesender/txbuilder/mocks_txbuilder/tx_builder.go rename to sequencesender/txbuilder/mocks_txbuilder/mock_tx_builder.go index 32ab8ab49..3eda5fdd3 100644 --- a/sequencesender/txbuilder/mocks_txbuilder/tx_builder.go +++ b/sequencesender/txbuilder/mocks_txbuilder/mock_tx_builder.go @@ -1,4 +1,4 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package mocks_txbuilder diff --git a/sequencesender/txbuilder/validium_cond_num_batches.go b/sequencesender/txbuilder/validium_cond_num_batches.go index 35173d8e6..13434cc68 100644 --- a/sequencesender/txbuilder/validium_cond_num_batches.go +++ b/sequencesender/txbuilder/validium_cond_num_batches.go @@ -3,8 +3,8 @@ package txbuilder import ( "context" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ) diff --git a/sequencesender/txbuilder/zkevm_cond_max_size.go b/sequencesender/txbuilder/zkevm_cond_max_size.go index 66eb94461..100d1403a 100644 --- a/sequencesender/txbuilder/zkevm_cond_max_size.go +++ b/sequencesender/txbuilder/zkevm_cond_max_size.go @@ -6,8 +6,8 @@ import ( "fmt" "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/sequencesender/seqsendertypes" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/common" ) diff --git a/state/encoding_batch_v2.go b/state/encoding_batch_v2.go index f058f072f..947825edd 100644 --- a/state/encoding_batch_v2.go +++ b/state/encoding_batch_v2.go @@ -56,8 +56,8 @@ import ( "fmt" "strconv" - "github.com/0xPolygon/cdk/hex" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/hex" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" ) diff --git a/state/encoding_batch_v2_test.go b/state/encoding_batch_v2_test.go index d263b5c79..e8eb94545 100644 --- a/state/encoding_batch_v2_test.go +++ b/state/encoding_batch_v2_test.go @@ -3,8 +3,8 @@ package state import ( "testing" - "github.com/0xPolygon/cdk/hex" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/hex" + "github.com/agglayer/aggkit/log" "github.com/stretchr/testify/require" ) diff --git a/state/helper.go b/state/helper.go index 7f2b64be5..4ddd0dfcd 100644 --- a/state/helper.go +++ b/state/helper.go @@ -6,8 +6,8 @@ import ( "math/big" "strconv" - "github.com/0xPolygon/cdk/hex" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/hex" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rlp" ) diff --git a/sync/common.go b/sync/common.go deleted file mode 100644 index 9e1129690..000000000 --- a/sync/common.go +++ /dev/null @@ -1,31 +0,0 @@ -package sync - -import ( - "log" - "sync" - "time" -) - -type RetryHandler struct { - RetryAfterErrorPeriod time.Duration - MaxRetryAttemptsAfterError int -} - -func (h *RetryHandler) Handle(funcName string, attempts int) { - if h.MaxRetryAttemptsAfterError > -1 && attempts >= h.MaxRetryAttemptsAfterError { - log.Fatalf( - "%s failed too many times (%d)", - funcName, h.MaxRetryAttemptsAfterError, - ) - } - time.Sleep(h.RetryAfterErrorPeriod) -} - -func UnhaltIfAffectedRows(halted *bool, haltedReason *string, mu *sync.RWMutex, rowsAffected int64) { - if rowsAffected > 0 { - mu.Lock() - defer mu.Unlock() - *halted = false - *haltedReason = "" - } -} diff --git a/sync/driver.go b/sync/driver.go deleted file mode 100644 index 7d3068fbb..000000000 --- a/sync/driver.go +++ /dev/null @@ -1,22 +0,0 @@ -package sync - -import ( - "context" - "errors" - - "github.com/ethereum/go-ethereum/common" -) - -var ErrInconsistentState = errors.New("state is inconsistent, try again later once the state is consolidated") - -type Block struct { - Num uint64 - Events []interface{} - Hash common.Hash -} - -type ProcessorInterface interface { - GetLastProcessedBlock(ctx context.Context) (uint64, error) - ProcessBlock(block Block) error - Reorg(firstReorgedBlock uint64) error -} diff --git a/sync/evmdownloader.go b/sync/evmdownloader.go deleted file mode 100644 index 1de2be732..000000000 --- a/sync/evmdownloader.go +++ /dev/null @@ -1,386 +0,0 @@ -package sync - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -const ( - DefaultWaitPeriodBlockNotFound = time.Millisecond * 100 -) - -type EthClienter interface { - ethereum.LogFilterer - ethereum.BlockNumberReader - ethereum.ChainReader - bind.ContractBackend -} - -type EVMDownloaderInterface interface { - WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) (newLastBlock uint64) - GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) EVMBlocks - GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log - GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) - GetLastFinalizedBlock(ctx context.Context) (*types.Header, error) -} - -type LogAppenderMap map[common.Hash]func(b *EVMBlock, l types.Log) error - -type EVMDownloader struct { - syncBlockChunkSize uint64 - EVMDownloaderInterface - log *log.Logger - finalizedBlockType etherman.BlockNumberFinality -} - -func NewEVMDownloader( - syncerID string, - ethClient EthClienter, - syncBlockChunkSize uint64, - blockFinalityType etherman.BlockNumberFinality, - waitForNewBlocksPeriod time.Duration, - appender LogAppenderMap, - adressessToQuery []common.Address, - rh *RetryHandler, - finalizedBlockType etherman.BlockNumberFinality, -) (*EVMDownloader, error) { - logger := log.WithFields("syncer", syncerID) - finality, err := blockFinalityType.ToBlockNum() - if err != nil { - return nil, err - } - - topicsToQuery := make([]common.Hash, 0, len(appender)) - for topic := range appender { - topicsToQuery = append(topicsToQuery, topic) - } - - fbtEthermanType := finalizedBlockType - fbt, err := finalizedBlockType.ToBlockNum() - if err != nil { - return nil, err - } - - if fbt.Cmp(finality) > 0 { - // if someone configured the syncer to query blocks by Safe or Finalized block - // finalized block type should be at least the same as the block finality - fbt = finality - fbtEthermanType = blockFinalityType - logger.Warnf("finalized block type %s is greater than block finality %s, setting finalized block type to %s", - finalizedBlockType, blockFinalityType, fbtEthermanType) - } - - logger.Infof("downloader initialized with block finality: %s, finalized block type: %s. SyncChunkSize: %d", - blockFinalityType, fbtEthermanType, syncBlockChunkSize) - - return &EVMDownloader{ - syncBlockChunkSize: syncBlockChunkSize, - log: logger, - finalizedBlockType: fbtEthermanType, - EVMDownloaderInterface: &EVMDownloaderImplementation{ - ethClient: ethClient, - blockFinality: finality, - waitForNewBlocksPeriod: waitForNewBlocksPeriod, - appender: appender, - topicsToQuery: topicsToQuery, - adressessToQuery: adressessToQuery, - rh: rh, - log: logger, - finalizedBlockType: fbt, - }, - }, nil -} - -func (d *EVMDownloader) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { - lastBlock := d.WaitForNewBlocks(ctx, 0) - - for { - select { - case <-ctx.Done(): - d.log.Info("closing evm downloader channel") - close(downloadedCh) - return - default: - } - - toBlock := fromBlock + d.syncBlockChunkSize - if toBlock > lastBlock { - toBlock = lastBlock - } - - if fromBlock > toBlock { - d.log.Infof( - "waiting for new blocks, last block processed: %d, last block seen on L1: %d", - fromBlock-1, lastBlock, - ) - lastBlock = d.WaitForNewBlocks(ctx, fromBlock-1) - continue - } - - lastFinalizedBlock, err := d.GetLastFinalizedBlock(ctx) - if err != nil { - d.log.Error("error getting last finalized block: ", err) - continue - } - - lastFinalizedBlockNumber := lastFinalizedBlock.Number.Uint64() - - d.log.Infof("getting events from blocks %d to %d. lastFinalizedBlock: %d", - fromBlock, toBlock, lastFinalizedBlockNumber) - blocks := d.GetEventsByBlockRange(ctx, fromBlock, toBlock) - - if toBlock <= lastFinalizedBlockNumber { - d.reportBlocks(downloadedCh, blocks, lastFinalizedBlockNumber) - fromBlock = toBlock + 1 - - if blocks.Len() == 0 || blocks[blocks.Len()-1].Num < toBlock { - d.reportEmptyBlock(ctx, downloadedCh, toBlock, lastFinalizedBlockNumber) - } - } else { - d.reportBlocks(downloadedCh, blocks, lastFinalizedBlockNumber) - - if blocks.Len() == 0 { - if lastFinalizedBlockNumber > fromBlock && - lastFinalizedBlockNumber-fromBlock > d.syncBlockChunkSize { - d.reportEmptyBlock(ctx, downloadedCh, fromBlock+d.syncBlockChunkSize, lastFinalizedBlockNumber) - fromBlock += d.syncBlockChunkSize + 1 - } - } else { - fromBlock = blocks[blocks.Len()-1].Num + 1 - } - } - } -} - -func (d *EVMDownloader) reportBlocks(downloadedCh chan EVMBlock, blocks EVMBlocks, lastFinalizedBlock uint64) { - for _, block := range blocks { - d.log.Infof("sending block %d to the driver (with events)", block.Num) - block.IsFinalizedBlock = d.finalizedBlockType.IsFinalized() && block.Num <= lastFinalizedBlock - downloadedCh <- *block - } -} - -func (d *EVMDownloader) reportEmptyBlock(ctx context.Context, downloadedCh chan EVMBlock, - blockNum, lastFinalizedBlock uint64) { - // Indicate the last downloaded block if there are not events on it - d.log.Debugf("sending block %d to the driver (without events)", blockNum) - header, isCanceled := d.GetBlockHeader(ctx, blockNum) - if isCanceled { - return - } - - downloadedCh <- EVMBlock{ - IsFinalizedBlock: d.finalizedBlockType.IsFinalized() && header.Num <= lastFinalizedBlock, - EVMBlockHeader: header, - } -} - -type EVMDownloaderImplementation struct { - ethClient EthClienter - blockFinality *big.Int - waitForNewBlocksPeriod time.Duration - appender LogAppenderMap - topicsToQuery []common.Hash - adressessToQuery []common.Address - rh *RetryHandler - log *log.Logger - finalizedBlockType *big.Int -} - -func NewEVMDownloaderImplementation( - syncerID string, - ethClient EthClienter, - blockFinality *big.Int, - waitForNewBlocksPeriod time.Duration, - appender LogAppenderMap, - topicsToQuery []common.Hash, - adressessToQuery []common.Address, - rh *RetryHandler, -) *EVMDownloaderImplementation { - logger := log.WithFields("syncer", syncerID) - return &EVMDownloaderImplementation{ - ethClient: ethClient, - blockFinality: blockFinality, - waitForNewBlocksPeriod: waitForNewBlocksPeriod, - appender: appender, - topicsToQuery: topicsToQuery, - adressessToQuery: adressessToQuery, - rh: rh, - log: logger, - } -} - -func (d *EVMDownloaderImplementation) GetLastFinalizedBlock(ctx context.Context) (*types.Header, error) { - return d.ethClient.HeaderByNumber(ctx, d.finalizedBlockType) -} - -func (d *EVMDownloaderImplementation) WaitForNewBlocks( - ctx context.Context, lastBlockSeen uint64, -) (newLastBlock uint64) { - attempts := 0 - ticker := time.NewTicker(d.waitForNewBlocksPeriod) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - d.log.Info("context cancelled") - return lastBlockSeen - case <-ticker.C: - header, err := d.ethClient.HeaderByNumber(ctx, d.blockFinality) - if err != nil { - if ctx.Err() == nil { - attempts++ - d.log.Error("error getting last block num from eth client: ", err) - d.rh.Handle("waitForNewBlocks", attempts) - } else { - d.log.Warn("context has been canceled while trying to get header by number") - } - continue - } - if header.Number.Uint64() > lastBlockSeen { - return header.Number.Uint64() - } - } - } -} - -func (d *EVMDownloaderImplementation) GetEventsByBlockRange(ctx context.Context, fromBlock, toBlock uint64) EVMBlocks { - select { - case <-ctx.Done(): - return nil - default: - blocks := EVMBlocks{} - logs := d.GetLogs(ctx, fromBlock, toBlock) - for _, l := range logs { - if len(blocks) == 0 || blocks[len(blocks)-1].Num < l.BlockNumber { - b, canceled := d.GetBlockHeader(ctx, l.BlockNumber) - if canceled { - return nil - } - - if b.Hash != l.BlockHash { - d.log.Infof( - "there has been a block hash change between the event query and the block query "+ - "for block %d: %s vs %s. Retrying.", - l.BlockNumber, b.Hash, l.BlockHash, - ) - return d.GetEventsByBlockRange(ctx, fromBlock, toBlock) - } - blocks = append(blocks, &EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: l.BlockNumber, - Hash: l.BlockHash, - Timestamp: b.Timestamp, - ParentHash: b.ParentHash, - }, - Events: []interface{}{}, - }) - } - - for { - attempts := 0 - err := d.appender[l.Topics[0]](blocks[len(blocks)-1], l) - if err != nil { - attempts++ - d.log.Error("error trying to append log: ", err) - d.rh.Handle("getLogs", attempts) - continue - } - break - } - } - - return blocks - } -} - -func filterQueryToString(query ethereum.FilterQuery) string { - return fmt.Sprintf("FromBlock: %s, ToBlock: %s, Addresses: %s, Topics: %s", - query.FromBlock.String(), query.ToBlock.String(), query.Addresses, query.Topics) -} - -func (d *EVMDownloaderImplementation) GetLogs(ctx context.Context, fromBlock, toBlock uint64) []types.Log { - query := ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(fromBlock), - Addresses: d.adressessToQuery, - ToBlock: new(big.Int).SetUint64(toBlock), - } - var ( - attempts = 0 - unfilteredLogs []types.Log - err error - ) - for { - unfilteredLogs, err = d.ethClient.FilterLogs(ctx, query) - if err != nil { - if errors.Is(err, context.Canceled) { - // context is canceled, we don't want to fatal on max attempts in this case - return nil - } - - attempts++ - d.log.Errorf("error calling FilterLogs to eth client: filter: %s err:%w ", - filterQueryToString(query), - err, - ) - d.rh.Handle("getLogs", attempts) - continue - } - break - } - logs := make([]types.Log, 0, len(unfilteredLogs)) - for _, l := range unfilteredLogs { - for _, topic := range d.topicsToQuery { - if l.Topics[0] == topic { - logs = append(logs, l) - break - } - } - } - return logs -} - -func (d *EVMDownloaderImplementation) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { - attempts := 0 - for { - header, err := d.ethClient.HeaderByNumber(ctx, new(big.Int).SetUint64(blockNum)) - if err != nil { - if errors.Is(err, context.Canceled) { - // context is canceled, we don't want to fatal on max attempts in this case - return EVMBlockHeader{}, true - } - if errors.Is(err, ethereum.NotFound) { - // block num can temporary disappear from the execution client due to a reorg, - // in this case, we want to wait and not panic - log.Warnf("block %d not found on the ethereum client: %v", blockNum, err) - if d.rh.RetryAfterErrorPeriod != 0 { - time.Sleep(d.rh.RetryAfterErrorPeriod) - } else { - time.Sleep(DefaultWaitPeriodBlockNotFound) - } - continue - } - - attempts++ - d.log.Errorf("error getting block header for block %d, err: %v", blockNum, err) - d.rh.Handle("getBlockHeader", attempts) - continue - } - return EVMBlockHeader{ - Num: header.Number.Uint64(), - Hash: header.Hash(), - ParentHash: header.ParentHash, - Timestamp: header.Time, - }, false - } -} diff --git a/sync/evmdownloader_test.go b/sync/evmdownloader_test.go deleted file mode 100644 index b4c600f16..000000000 --- a/sync/evmdownloader_test.go +++ /dev/null @@ -1,514 +0,0 @@ -package sync - -import ( - "context" - "errors" - "math/big" - "strconv" - "testing" - "time" - - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/log" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -var ( - contractAddr = common.HexToAddress("f00") - eventSignature = crypto.Keccak256Hash([]byte("foo")) -) - -const ( - syncBlockChunck = uint64(10) -) - -type testEvent common.Hash - -func TestGetEventsByBlockRange(t *testing.T) { - type testCase struct { - description string - inputLogs []types.Log - fromBlock, toBlock uint64 - expectedBlocks EVMBlocks - } - testCases := []testCase{} - ctx := context.Background() - d, clientMock := NewTestDownloader(t, time.Millisecond*100) - - // case 0: single block, no events - case0 := testCase{ - description: "case 0: single block, no events", - inputLogs: []types.Log{}, - fromBlock: 1, - toBlock: 3, - expectedBlocks: EVMBlocks{}, - } - testCases = append(testCases, case0) - - // case 1: single block, single event - logC1, updateC1 := generateEvent(3) - logsC1 := []types.Log{ - *logC1, - } - blocksC1 := EVMBlocks{ - { - EVMBlockHeader: EVMBlockHeader{ - Num: logC1.BlockNumber, - Hash: logC1.BlockHash, - ParentHash: common.HexToHash("foo"), - }, - Events: []interface{}{updateC1}, - }, - } - case1 := testCase{ - description: "case 1: single block, single event", - inputLogs: logsC1, - fromBlock: 3, - toBlock: 3, - expectedBlocks: blocksC1, - } - testCases = append(testCases, case1) - - // case 2: single block, multiple events - logC2_1, updateC2_1 := generateEvent(5) - logC2_2, updateC2_2 := generateEvent(5) - logC2_3, updateC2_3 := generateEvent(5) - logC2_4, updateC2_4 := generateEvent(5) - logsC2 := []types.Log{ - *logC2_1, - *logC2_2, - *logC2_3, - *logC2_4, - } - blocksC2 := []*EVMBlock{ - { - EVMBlockHeader: EVMBlockHeader{ - Num: logC2_1.BlockNumber, - Hash: logC2_1.BlockHash, - ParentHash: common.HexToHash("foo"), - }, - Events: []interface{}{ - updateC2_1, - updateC2_2, - updateC2_3, - updateC2_4, - }, - }, - } - case2 := testCase{ - description: "case 2: single block, multiple events", - inputLogs: logsC2, - fromBlock: 5, - toBlock: 5, - expectedBlocks: blocksC2, - } - testCases = append(testCases, case2) - - // case 3: multiple blocks, some events - logC3_1, updateC3_1 := generateEvent(7) - logC3_2, updateC3_2 := generateEvent(7) - logC3_3, updateC3_3 := generateEvent(8) - logC3_4, updateC3_4 := generateEvent(8) - logsC3 := []types.Log{ - *logC3_1, - *logC3_2, - *logC3_3, - *logC3_4, - } - blocksC3 := EVMBlocks{ - { - EVMBlockHeader: EVMBlockHeader{ - Num: logC3_1.BlockNumber, - Hash: logC3_1.BlockHash, - ParentHash: common.HexToHash("foo"), - }, - Events: []interface{}{ - updateC3_1, - updateC3_2, - }, - }, - { - EVMBlockHeader: EVMBlockHeader{ - Num: logC3_3.BlockNumber, - Hash: logC3_3.BlockHash, - ParentHash: common.HexToHash("foo"), - }, - Events: []interface{}{ - updateC3_3, - updateC3_4, - }, - }, - } - case3 := testCase{ - description: "case 3: multiple blocks, some events", - inputLogs: logsC3, - fromBlock: 7, - toBlock: 8, - expectedBlocks: blocksC3, - } - testCases = append(testCases, case3) - - for _, tc := range testCases { - query := ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(tc.fromBlock), - Addresses: []common.Address{contractAddr}, - ToBlock: new(big.Int).SetUint64(tc.toBlock), - } - clientMock. - On("FilterLogs", mock.Anything, query). - Return(tc.inputLogs, nil) - for _, b := range tc.expectedBlocks { - clientMock. - On("HeaderByNumber", mock.Anything, big.NewInt(int64(b.Num))). - Return(&types.Header{ - Number: big.NewInt(int64(b.Num)), - ParentHash: common.HexToHash("foo"), - }, nil) - } - - actualBlocks := d.GetEventsByBlockRange(ctx, tc.fromBlock, tc.toBlock) - require.Equal(t, tc.expectedBlocks, actualBlocks, tc.description) - } -} - -func generateEvent(blockNum uint32) (*types.Log, testEvent) { - h := common.HexToHash(strconv.Itoa(int(blockNum))) - header := types.Header{ - Number: big.NewInt(int64(blockNum)), - ParentHash: common.HexToHash("foo"), - } - blockHash := header.Hash() - log := &types.Log{ - Address: contractAddr, - BlockNumber: uint64(blockNum), - Topics: []common.Hash{ - eventSignature, - h, - }, - BlockHash: blockHash, - Data: nil, - } - return log, testEvent(h) -} - -func TestDownload(t *testing.T) { - /* - NOTE: due to the concurrent nature of this test (the function being tested runs through a goroutine) - if the mock doesn't match, the goroutine will get stuck and the test will timeout - */ - d := NewEVMDownloaderMock(t) - downloadCh := make(chan EVMBlock, 1) - ctx := context.Background() - ctx1, cancel := context.WithCancel(ctx) - expectedBlocks := EVMBlocks{} - dwnldr, _ := NewTestDownloader(t, time.Millisecond*100) - dwnldr.EVMDownloaderInterface = d - - d.On("WaitForNewBlocks", mock.Anything, uint64(0)). - Return(uint64(1)) - - lastFinalizedBlock := &types.Header{Number: big.NewInt(1)} - createEVMBlockFn := func(header *types.Header, isSafeBlock bool) *EVMBlock { - return &EVMBlock{ - IsFinalizedBlock: isSafeBlock, - EVMBlockHeader: EVMBlockHeader{ - Num: header.Number.Uint64(), - Hash: header.Hash(), - ParentHash: header.ParentHash, - Timestamp: header.Time, - }, - } - } - - // iteration 0: - // last block is 1, download that block (no events and wait) - b0 := createEVMBlockFn(lastFinalizedBlock, true) - expectedBlocks = append(expectedBlocks, b0) - d.On("GetLastFinalizedBlock", mock.Anything).Return(lastFinalizedBlock, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(0), uint64(1)). - Return(EVMBlocks{}, false).Once() - d.On("GetBlockHeader", mock.Anything, uint64(1)).Return(b0.EVMBlockHeader, false).Once() - - // iteration 1: we have a new block, so increase to block (no events) - lastFinalizedBlock = &types.Header{Number: big.NewInt(2)} - b2 := createEVMBlockFn(lastFinalizedBlock, true) - expectedBlocks = append(expectedBlocks, b2) - d.On("WaitForNewBlocks", mock.Anything, uint64(1)). - Return(uint64(2)) - d.On("GetLastFinalizedBlock", mock.Anything).Return(lastFinalizedBlock, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(2), uint64(2)). - Return(EVMBlocks{}, false).Once() - d.On("GetBlockHeader", mock.Anything, uint64(2)).Return(b2.EVMBlockHeader, false).Once() - - // iteration 2: wait for next block to be created (jump to block 8) - d.On("WaitForNewBlocks", mock.Anything, uint64(2)). - After(time.Millisecond * 100). - Return(uint64(8)).Once() - - // iteration 3: blocks 6 and 7 have events, last finalized block is 5 - lastFinalizedBlock = &types.Header{Number: big.NewInt(5)} - b6 := &EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 6, - Hash: common.HexToHash("06"), - }, - Events: []interface{}{"06"}, - } - b7 := &EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 7, - Hash: common.HexToHash("07"), - }, - Events: []interface{}{"07"}, - } - expectedBlocks = append(expectedBlocks, b6, b7) - d.On("GetLastFinalizedBlock", mock.Anything).Return(lastFinalizedBlock, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(3), uint64(8)). - Return(EVMBlocks{b6, b7}, false) - - // iteration 4: finalized block is now block 8, report the finalized block - lastFinalizedBlock = &types.Header{Number: big.NewInt(8)} - b8 := createEVMBlockFn(lastFinalizedBlock, true) - expectedBlocks = append(expectedBlocks, b8) - d.On("GetLastFinalizedBlock", mock.Anything).Return(lastFinalizedBlock, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(8), uint64(8)). - Return(EVMBlocks{}, false) - d.On("GetBlockHeader", mock.Anything, uint64(8)).Return(b8.EVMBlockHeader, false).Once() - - // iteration 5: from block 9 to 19, no events - lastFinalizedBlock = &types.Header{Number: big.NewInt(15)} - d.On("WaitForNewBlocks", mock.Anything, uint64(8)). - After(time.Millisecond * 100). - Return(uint64(19)).Once() - d.On("GetLastFinalizedBlock", mock.Anything).Return(lastFinalizedBlock, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(9), uint64(19)). - Return(EVMBlocks{}, false) - - // iteration 6: last finalized block is now 20, no events, report empty block - d.On("GetLastFinalizedBlock", mock.Anything).Return(&types.Header{Number: big.NewInt(20)}, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(9), uint64(19)). - Return(EVMBlocks{}, false) - - d.On("WaitForNewBlocks", mock.Anything, uint64(19)). - After(time.Millisecond * 100). - Return(uint64(20)).Once() - b19 := createEVMBlockFn(&types.Header{Number: big.NewInt(19)}, true) - expectedBlocks = append(expectedBlocks, b19) - d.On("GetBlockHeader", mock.Anything, uint64(19)).Return(b19.EVMBlockHeader, false) // reporting empty finalized to block - - // iteration 8: last finalized block is 21, no events - b20 := createEVMBlockFn(&types.Header{Number: big.NewInt(20)}, true) - expectedBlocks = append(expectedBlocks, b20) - d.On("GetLastFinalizedBlock", mock.Anything).Return(&types.Header{Number: big.NewInt(21)}, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(20), uint64(20)). - Return(EVMBlocks{}, false) - d.On("GetBlockHeader", mock.Anything, uint64(20)).Return(b20.EVMBlockHeader, false) // reporting empty finalized to block - - // iteration 9: last finalized block is 22, no events - d.On("WaitForNewBlocks", mock.Anything, uint64(20)). - After(time.Millisecond * 100). - Return(uint64(21)).Once() - b21 := createEVMBlockFn(&types.Header{Number: big.NewInt(21)}, true) - expectedBlocks = append(expectedBlocks, b21) - d.On("GetLastFinalizedBlock", mock.Anything).Return(&types.Header{Number: big.NewInt(22)}, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(21), uint64(21)). - Return(EVMBlocks{}, false) - d.On("GetBlockHeader", mock.Anything, uint64(21)).Return(b21.EVMBlockHeader, false) // reporting empty finalized to block - - // iteration 10: last finalized block is 23, no events - d.On("WaitForNewBlocks", mock.Anything, uint64(21)). - After(time.Millisecond * 100). - Return(uint64(22)).Once() - b22 := createEVMBlockFn(&types.Header{Number: big.NewInt(22)}, true) - expectedBlocks = append(expectedBlocks, b22) - d.On("GetLastFinalizedBlock", mock.Anything).Return(&types.Header{Number: big.NewInt(23)}, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(22), uint64(22)). - Return(EVMBlocks{}, false) - d.On("GetBlockHeader", mock.Anything, uint64(22)).Return(b22.EVMBlockHeader, false) // reporting empty finalized to block - - // iteration 11: last finalized block is still 23, no events - d.On("WaitForNewBlocks", mock.Anything, uint64(22)). - After(time.Millisecond * 100). - Return(uint64(23)).Once() - b23 := createEVMBlockFn(&types.Header{Number: big.NewInt(23)}, true) - expectedBlocks = append(expectedBlocks, b23) - d.On("GetLastFinalizedBlock", mock.Anything).Return(&types.Header{Number: big.NewInt(23)}, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(23), uint64(23)). - Return(EVMBlocks{}, false) - d.On("GetBlockHeader", mock.Anything, uint64(23)).Return(b23.EVMBlockHeader, false) // reporting empty finalized to block - - // iteration 12: finalized block is 24, has events - d.On("WaitForNewBlocks", mock.Anything, uint64(23)). - After(time.Millisecond * 100). - Return(uint64(24)).Once() - b24 := &EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 24, - Hash: common.HexToHash("24"), - }, - Events: []interface{}{testEvent(common.HexToHash("24"))}, - } - expectedBlocks = append(expectedBlocks, b24) - d.On("GetLastFinalizedBlock", mock.Anything).Return(&types.Header{Number: big.NewInt(24)}, nil).Once() - d.On("GetEventsByBlockRange", mock.Anything, uint64(24), uint64(24)). - Return(EVMBlocks{b24}, false) - - // iteration 13: closing the downloader - d.On("WaitForNewBlocks", mock.Anything, uint64(24)).Return(uint64(25)).After(time.Millisecond * 100).Once() - - go dwnldr.Download(ctx1, 0, downloadCh) - for _, expectedBlock := range expectedBlocks { - actualBlock := <-downloadCh - log.Debugf("block %d received!", actualBlock.Num) - require.Equal(t, *expectedBlock, actualBlock) - } - log.Debug("canceling") - cancel() - _, ok := <-downloadCh - require.False(t, ok) -} - -func TestWaitForNewBlocks(t *testing.T) { - ctx := context.Background() - d, clientMock := NewTestDownloader(t, time.Millisecond*100) - - // at first attempt - currentBlock := uint64(5) - expectedBlock := uint64(6) - clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{ - Number: big.NewInt(6), - }, nil).Once() - actualBlock := d.WaitForNewBlocks(ctx, currentBlock) - assert.Equal(t, expectedBlock, actualBlock) - - // 2 iterations - clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{ - Number: big.NewInt(5), - }, nil).Once() - clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{ - Number: big.NewInt(6), - }, nil).Once() - actualBlock = d.WaitForNewBlocks(ctx, currentBlock) - assert.Equal(t, expectedBlock, actualBlock) - - // after error from client - clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(nil, errors.New("foo")).Once() - clientMock.On("HeaderByNumber", ctx, mock.Anything).Return(&types.Header{ - Number: big.NewInt(6), - }, nil).Once() - actualBlock = d.WaitForNewBlocks(ctx, currentBlock) - assert.Equal(t, expectedBlock, actualBlock) -} - -func TestGetBlockHeader(t *testing.T) { - ctx := context.Background() - d, clientMock := NewTestDownloader(t, time.Millisecond) - - blockNum := uint64(5) - blockNumBig := big.NewInt(5) - returnedBlock := &types.Header{ - Number: blockNumBig, - } - expectedBlock := EVMBlockHeader{ - Num: 5, - Hash: returnedBlock.Hash(), - } - - // at first attempt - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock, isCanceled := d.GetBlockHeader(ctx, blockNum) - assert.Equal(t, expectedBlock, actualBlock) - assert.False(t, isCanceled) - - // after error from client - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, errors.New("foo")).Once() - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock, isCanceled = d.GetBlockHeader(ctx, blockNum) - assert.Equal(t, expectedBlock, actualBlock) - assert.False(t, isCanceled) - - // header not found default - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, ethereum.NotFound).Once() - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock, isCanceled = d.GetBlockHeader(ctx, 5) - assert.Equal(t, expectedBlock, actualBlock) - assert.False(t, isCanceled) - - // header not found default TO - d, clientMock = NewTestDownloader(t, 0) - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(nil, ethereum.NotFound).Once() - clientMock.On("HeaderByNumber", ctx, blockNumBig).Return(returnedBlock, nil).Once() - actualBlock, isCanceled = d.GetBlockHeader(ctx, 5) - assert.Equal(t, expectedBlock, actualBlock) - assert.False(t, isCanceled) -} - -func TestFilterQueryToString(t *testing.T) { - addr1 := common.HexToAddress("0xf000") - addr2 := common.HexToAddress("0xabcd") - query := ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(1000), - Addresses: []common.Address{addr1, addr2}, - ToBlock: new(big.Int).SetUint64(1100), - } - - assert.Equal(t, "FromBlock: 1000, ToBlock: 1100, Addresses: [0x000000000000000000000000000000000000f000 0x000000000000000000000000000000000000ABcD], Topics: []", filterQueryToString(query)) - - query = ethereum.FilterQuery{ - FromBlock: new(big.Int).SetUint64(1000), - Addresses: []common.Address{addr1, addr2}, - ToBlock: new(big.Int).SetUint64(1100), - Topics: [][]common.Hash{{common.HexToHash("0x1234"), common.HexToHash("0x5678")}}, - } - assert.Equal(t, "FromBlock: 1000, ToBlock: 1100, Addresses: [0x000000000000000000000000000000000000f000 0x000000000000000000000000000000000000ABcD], Topics: [[0x0000000000000000000000000000000000000000000000000000000000001234 0x0000000000000000000000000000000000000000000000000000000000005678]]", filterQueryToString(query)) -} - -func TestGetLogs(t *testing.T) { - mockEthClient := NewL2Mock(t) - sut := EVMDownloaderImplementation{ - ethClient: mockEthClient, - adressessToQuery: []common.Address{contractAddr}, - log: log.WithFields("test", "EVMDownloaderImplementation"), - rh: &RetryHandler{ - RetryAfterErrorPeriod: time.Millisecond, - MaxRetryAttemptsAfterError: 5, - }, - } - ctx := context.TODO() - mockEthClient.EXPECT().FilterLogs(ctx, mock.Anything).Return(nil, errors.New("foo")).Once() - mockEthClient.EXPECT().FilterLogs(ctx, mock.Anything).Return(nil, nil).Once() - logs := sut.GetLogs(ctx, 0, 1) - require.Equal(t, []types.Log{}, logs) -} - -func buildAppender() LogAppenderMap { - appender := make(LogAppenderMap) - appender[eventSignature] = func(b *EVMBlock, l types.Log) error { - b.Events = append(b.Events, testEvent(l.Topics[1])) - return nil - } - return appender -} - -func NewTestDownloader(t *testing.T, retryPeriod time.Duration) (*EVMDownloader, *L2Mock) { - t.Helper() - - rh := &RetryHandler{ - MaxRetryAttemptsAfterError: 5, - RetryAfterErrorPeriod: retryPeriod, - } - clientMock := NewL2Mock(t) - d, err := NewEVMDownloader("test", - clientMock, syncBlockChunck, etherman.LatestBlock, time.Millisecond, - buildAppender(), []common.Address{contractAddr}, rh, - etherman.FinalizedBlock, - ) - require.NoError(t, err) - return d, clientMock -} diff --git a/sync/evmdriver.go b/sync/evmdriver.go deleted file mode 100644 index 4ac4c9cbf..000000000 --- a/sync/evmdriver.go +++ /dev/null @@ -1,191 +0,0 @@ -package sync - -import ( - "context" - "errors" - - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/ethereum/go-ethereum/common" -) - -type downloader interface { - Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) -} - -type EVMDriver struct { - reorgDetector ReorgDetector - reorgSub *reorgdetector.Subscription - processor processorInterface - downloader downloader - reorgDetectorID string - downloadBufferSize int - rh *RetryHandler - log *log.Logger -} - -type processorInterface interface { - GetLastProcessedBlock(ctx context.Context) (uint64, error) - ProcessBlock(ctx context.Context, block Block) error - Reorg(ctx context.Context, firstReorgedBlock uint64) error -} - -type ReorgDetector interface { - Subscribe(id string) (*reorgdetector.Subscription, error) - AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error -} - -func NewEVMDriver( - reorgDetector ReorgDetector, - processor processorInterface, - downloader downloader, - reorgDetectorID string, - downloadBufferSize int, - rh *RetryHandler, -) (*EVMDriver, error) { - logger := log.WithFields("syncer", reorgDetectorID) - reorgSub, err := reorgDetector.Subscribe(reorgDetectorID) - if err != nil { - return nil, err - } - return &EVMDriver{ - reorgDetector: reorgDetector, - reorgSub: reorgSub, - processor: processor, - downloader: downloader, - reorgDetectorID: reorgDetectorID, - downloadBufferSize: downloadBufferSize, - rh: rh, - log: logger, - }, nil -} - -func (d *EVMDriver) Sync(ctx context.Context) { -reset: - var ( - lastProcessedBlock uint64 - attempts int - err error - ) - - for { - lastProcessedBlock, err = d.processor.GetLastProcessedBlock(ctx) - if err != nil { - attempts++ - d.log.Error("error getting last processed block: ", err) - d.rh.Handle("Sync", attempts) - continue - } - break - } - cancellableCtx, cancel := context.WithCancel(ctx) - defer cancel() - - d.log.Infof("Starting sync... lastProcessedBlock %d", lastProcessedBlock) - // start downloading - downloadCh := make(chan EVMBlock, d.downloadBufferSize) - go d.downloader.Download(cancellableCtx, lastProcessedBlock+1, downloadCh) - - for { - select { - case <-ctx.Done(): - d.log.Info("sync stopped due to context done") - cancel() - return - case b, ok := <-downloadCh: - if ok { - // when channel is closing, it is sending an empty block with num = 0, and empty hash - // because it is not passing object by reference, but by value, so do not handle that since it is closing - d.log.Infof("handleNewBlock, blockNum: %d, blockHash: %s", b.Num, b.Hash) - d.handleNewBlock(ctx, cancel, b) - } - case firstReorgedBlock := <-d.reorgSub.ReorgedBlock: - d.log.Debug("handleReorg from block: ", firstReorgedBlock) - d.handleReorg(ctx, cancel, firstReorgedBlock) - goto reset - } - } -} - -func (d *EVMDriver) handleNewBlock(ctx context.Context, cancel context.CancelFunc, b EVMBlock) { - attempts := 0 - succeed := false - for { - select { - case <-ctx.Done(): - // If the context is canceled, exit the function - d.log.Warnf("context canceled while adding block %d to tracker", b.Num) - return - default: - if !b.IsFinalizedBlock { - err := d.reorgDetector.AddBlockToTrack(ctx, d.reorgDetectorID, b.Num, b.Hash) - if err != nil { - attempts++ - d.log.Errorf("error adding block %d to tracker: %v", b.Num, err) - d.rh.Handle("handleNewBlock", attempts) - } else { - succeed = true - } - } else { - succeed = true - } - } - if succeed { - break - } - } - attempts = 0 - succeed = false - for { - select { - case <-ctx.Done(): - // If the context is canceled, exit the function - d.log.Warnf("context canceled while processing block %d", b.Num) - return - default: - blockToProcess := Block{ - Num: b.Num, - Events: b.Events, - Hash: b.Hash, - } - err := d.processor.ProcessBlock(ctx, blockToProcess) - if err != nil { - if errors.Is(err, ErrInconsistentState) { - d.log.Warn("state got inconsistent after processing this block. Stopping downloader until there is a reorg") - cancel() - return - } - attempts++ - d.log.Errorf("error processing events for block %d, err: %v", b.Num, err) - d.rh.Handle("handleNewBlock", attempts) - } else { - succeed = true - } - } - if succeed { - break - } - } -} - -func (d *EVMDriver) handleReorg(ctx context.Context, cancel context.CancelFunc, firstReorgedBlock uint64) { - // stop downloader - cancel() - - // handle reorg - attempts := 0 - for { - err := d.processor.Reorg(ctx, firstReorgedBlock) - if err != nil { - attempts++ - d.log.Errorf( - "error processing reorg, last valid Block %d, err: %v", - firstReorgedBlock, err, - ) - d.rh.Handle("handleReorg", attempts) - continue - } - break - } - d.reorgSub.ReorgProcessed <- true -} diff --git a/sync/evmdriver_test.go b/sync/evmdriver_test.go deleted file mode 100644 index 9edbf0b2b..000000000 --- a/sync/evmdriver_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package sync - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -var ( - reorgDetectorID = "foo" -) - -func TestSync(t *testing.T) { - rh := &RetryHandler{ - MaxRetryAttemptsAfterError: 5, - RetryAfterErrorPeriod: time.Millisecond * 100, - } - rdm := NewReorgDetectorMock(t) - pm := NewProcessorMock(t) - dm := NewEVMDownloaderMock(t) - firstReorgedBlock := make(chan uint64) - reorgProcessed := make(chan bool) - rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{ - ReorgedBlock: firstReorgedBlock, - ReorgProcessed: reorgProcessed, - }, nil) - driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh) - require.NoError(t, err) - ctx := context.Background() - expectedBlock1 := EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 3, - Hash: common.HexToHash("03"), - }, - } - expectedBlock2 := EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 9, - Hash: common.HexToHash("09"), - }, - } - type reorgSemaphore struct { - mu sync.Mutex - green bool - } - reorg1Completed := reorgSemaphore{} - dm.On("Download", mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { - ctx, ok := args.Get(0).(context.Context) - if !ok { - log.Error("failed to assert type for context") - return - } - - downloadedCh, ok := args.Get(2).(chan EVMBlock) - if !ok { - log.Error("failed to assert type for downloadedCh") - return - } - - log.Info("entering mock loop") - for { - select { - case <-ctx.Done(): - log.Info("closing channel") - close(downloadedCh) - return - default: - } - reorg1Completed.mu.Lock() - green := reorg1Completed.green - reorg1Completed.mu.Unlock() - if green { - downloadedCh <- expectedBlock2 - } else { - downloadedCh <- expectedBlock1 - } - time.Sleep(100 * time.Millisecond) - } - }) - - // Mocking this actions, the driver should "store" all the blocks from the downloader - pm.On("GetLastProcessedBlock", ctx). - Return(uint64(3), nil) - rdm.On("AddBlockToTrack", ctx, reorgDetectorID, expectedBlock1.Num, expectedBlock1.Hash). - Return(nil) - pm.On("ProcessBlock", ctx, Block{Num: expectedBlock1.Num, Events: expectedBlock1.Events, Hash: expectedBlock1.Hash}). - Return(nil) - rdm.On("AddBlockToTrack", ctx, reorgDetectorID, expectedBlock2.Num, expectedBlock2.Hash). - Return(nil) - pm.On("ProcessBlock", ctx, Block{Num: expectedBlock2.Num, Events: expectedBlock2.Events, Hash: expectedBlock2.Hash}). - Return(nil) - go driver.Sync(ctx) - time.Sleep(time.Millisecond * 200) // time to download expectedBlock1 - - // Trigger reorg 1 - reorgedBlock1 := uint64(5) - pm.On("Reorg", ctx, reorgedBlock1).Return(nil) - firstReorgedBlock <- reorgedBlock1 - ok := <-reorgProcessed - require.True(t, ok) - reorg1Completed.mu.Lock() - reorg1Completed.green = true - reorg1Completed.mu.Unlock() - time.Sleep(time.Millisecond * 200) // time to download expectedBlock2 - - // Trigger reorg 2: syncer restarts the porcess - reorgedBlock2 := uint64(7) - pm.On("Reorg", ctx, reorgedBlock2).Return(nil) - firstReorgedBlock <- reorgedBlock2 - ok = <-reorgProcessed - require.True(t, ok) -} - -func TestHandleNewBlock(t *testing.T) { - rh := &RetryHandler{ - MaxRetryAttemptsAfterError: 5, - RetryAfterErrorPeriod: time.Millisecond * 100, - } - rdm := NewReorgDetectorMock(t) - pm := NewProcessorMock(t) - dm := NewEVMDownloaderMock(t) - rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{}, nil) - driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh) - require.NoError(t, err) - ctx := context.Background() - - // happy path - b1 := EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 1, - Hash: common.HexToHash("f00"), - }, - } - rdm. - On("AddBlockToTrack", ctx, reorgDetectorID, b1.Num, b1.Hash). - Return(nil) - pm.On("ProcessBlock", ctx, Block{Num: b1.Num, Events: b1.Events, Hash: b1.Hash}). - Return(nil) - driver.handleNewBlock(ctx, nil, b1) - - // reorg deteector fails once - b2 := EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 2, - Hash: common.HexToHash("f00"), - }, - } - rdm. - On("AddBlockToTrack", ctx, reorgDetectorID, b2.Num, b2.Hash). - Return(errors.New("foo")).Once() - rdm. - On("AddBlockToTrack", ctx, reorgDetectorID, b2.Num, b2.Hash). - Return(nil).Once() - pm.On("ProcessBlock", ctx, Block{Num: b2.Num, Events: b2.Events, Hash: b2.Hash}). - Return(nil) - driver.handleNewBlock(ctx, nil, b2) - - // processor fails once - b3 := EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 3, - Hash: common.HexToHash("f00"), - }, - } - rdm. - On("AddBlockToTrack", ctx, reorgDetectorID, b3.Num, b3.Hash). - Return(nil) - pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events, Hash: b3.Hash}). - Return(errors.New("foo")).Once() - pm.On("ProcessBlock", ctx, Block{Num: b3.Num, Events: b3.Events, Hash: b3.Hash}). - Return(nil).Once() - driver.handleNewBlock(ctx, nil, b3) - - // inconsistent state error - b4 := EVMBlock{ - EVMBlockHeader: EVMBlockHeader{ - Num: 4, - Hash: common.HexToHash("f00"), - }, - } - rdm. - On("AddBlockToTrack", ctx, reorgDetectorID, b4.Num, b4.Hash). - Return(nil) - pm.On("ProcessBlock", ctx, Block{Num: b4.Num, Events: b4.Events, Hash: b4.Hash}). - Return(ErrInconsistentState) - cancelIsCalled := false - cancel := func() { - cancelIsCalled = true - } - driver.handleNewBlock(ctx, cancel, b4) - require.True(t, cancelIsCalled) -} - -func TestHandleReorg(t *testing.T) { - rh := &RetryHandler{ - MaxRetryAttemptsAfterError: 5, - RetryAfterErrorPeriod: time.Millisecond * 100, - } - rdm := NewReorgDetectorMock(t) - pm := NewProcessorMock(t) - dm := NewEVMDownloaderMock(t) - reorgProcessed := make(chan bool) - rdm.On("Subscribe", reorgDetectorID).Return(&reorgdetector.Subscription{ - ReorgProcessed: reorgProcessed, - }, nil) - driver, err := NewEVMDriver(rdm, pm, dm, reorgDetectorID, 10, rh) - require.NoError(t, err) - ctx := context.Background() - - // happy path - _, cancel := context.WithCancel(ctx) - firstReorgedBlock := uint64(5) - pm.On("Reorg", ctx, firstReorgedBlock).Return(nil) - go driver.handleReorg(ctx, cancel, firstReorgedBlock) - done := <-reorgProcessed - require.True(t, done) - - // processor fails 2 times - _, cancel = context.WithCancel(ctx) - firstReorgedBlock = uint64(7) - pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() - pm.On("Reorg", ctx, firstReorgedBlock).Return(errors.New("foo")).Once() - pm.On("Reorg", ctx, firstReorgedBlock).Return(nil).Once() - go driver.handleReorg(ctx, cancel, firstReorgedBlock) - done = <-reorgProcessed - require.True(t, done) -} diff --git a/sync/evmtypes.go b/sync/evmtypes.go deleted file mode 100644 index 739154f90..000000000 --- a/sync/evmtypes.go +++ /dev/null @@ -1,22 +0,0 @@ -package sync - -import "github.com/ethereum/go-ethereum/common" - -type EVMBlocks []*EVMBlock - -func (e EVMBlocks) Len() int { - return len(e) -} - -type EVMBlock struct { - EVMBlockHeader - IsFinalizedBlock bool - Events []interface{} -} - -type EVMBlockHeader struct { - Num uint64 - Hash common.Hash - ParentHash common.Hash - Timestamp uint64 -} diff --git a/sync/mock_downloader_test.go b/sync/mock_downloader_test.go deleted file mode 100644 index 43fed1ede..000000000 --- a/sync/mock_downloader_test.go +++ /dev/null @@ -1,276 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package sync - -import ( - context "context" - - types "github.com/ethereum/go-ethereum/core/types" - mock "github.com/stretchr/testify/mock" -) - -// EVMDownloaderMock is an autogenerated mock type for the evmDownloaderFull type -type EVMDownloaderMock struct { - mock.Mock -} - -type EVMDownloaderMock_Expecter struct { - mock *mock.Mock -} - -func (_m *EVMDownloaderMock) EXPECT() *EVMDownloaderMock_Expecter { - return &EVMDownloaderMock_Expecter{mock: &_m.Mock} -} - -// Download provides a mock function with given fields: ctx, fromBlock, downloadedCh -func (_m *EVMDownloaderMock) Download(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock) { - _m.Called(ctx, fromBlock, downloadedCh) -} - -// EVMDownloaderMock_Download_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Download' -type EVMDownloaderMock_Download_Call struct { - *mock.Call -} - -// Download is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - downloadedCh chan EVMBlock -func (_e *EVMDownloaderMock_Expecter) Download(ctx interface{}, fromBlock interface{}, downloadedCh interface{}) *EVMDownloaderMock_Download_Call { - return &EVMDownloaderMock_Download_Call{Call: _e.mock.On("Download", ctx, fromBlock, downloadedCh)} -} - -func (_c *EVMDownloaderMock_Download_Call) Run(run func(ctx context.Context, fromBlock uint64, downloadedCh chan EVMBlock)) *EVMDownloaderMock_Download_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(chan EVMBlock)) - }) - return _c -} - -func (_c *EVMDownloaderMock_Download_Call) Return() *EVMDownloaderMock_Download_Call { - _c.Call.Return() - return _c -} - -func (_c *EVMDownloaderMock_Download_Call) RunAndReturn(run func(context.Context, uint64, chan EVMBlock)) *EVMDownloaderMock_Download_Call { - _c.Run(run) - return _c -} - -// GetBlockHeader provides a mock function with given fields: ctx, blockNum -func (_m *EVMDownloaderMock) GetBlockHeader(ctx context.Context, blockNum uint64) (EVMBlockHeader, bool) { - ret := _m.Called(ctx, blockNum) - - if len(ret) == 0 { - panic("no return value specified for GetBlockHeader") - } - - var r0 EVMBlockHeader - var r1 bool - if rf, ok := ret.Get(0).(func(context.Context, uint64) (EVMBlockHeader, bool)); ok { - return rf(ctx, blockNum) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64) EVMBlockHeader); ok { - r0 = rf(ctx, blockNum) - } else { - r0 = ret.Get(0).(EVMBlockHeader) - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64) bool); ok { - r1 = rf(ctx, blockNum) - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} - -// EVMDownloaderMock_GetBlockHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockHeader' -type EVMDownloaderMock_GetBlockHeader_Call struct { - *mock.Call -} - -// GetBlockHeader is a helper method to define mock.On call -// - ctx context.Context -// - blockNum uint64 -func (_e *EVMDownloaderMock_Expecter) GetBlockHeader(ctx interface{}, blockNum interface{}) *EVMDownloaderMock_GetBlockHeader_Call { - return &EVMDownloaderMock_GetBlockHeader_Call{Call: _e.mock.On("GetBlockHeader", ctx, blockNum)} -} - -func (_c *EVMDownloaderMock_GetBlockHeader_Call) Run(run func(ctx context.Context, blockNum uint64)) *EVMDownloaderMock_GetBlockHeader_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64)) - }) - return _c -} - -func (_c *EVMDownloaderMock_GetBlockHeader_Call) Return(_a0 EVMBlockHeader, _a1 bool) *EVMDownloaderMock_GetBlockHeader_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *EVMDownloaderMock_GetBlockHeader_Call) RunAndReturn(run func(context.Context, uint64) (EVMBlockHeader, bool)) *EVMDownloaderMock_GetBlockHeader_Call { - _c.Call.Return(run) - return _c -} - -// GetEventsByBlockRange provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *EVMDownloaderMock) GetEventsByBlockRange(ctx context.Context, fromBlock uint64, toBlock uint64) EVMBlocks { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetEventsByBlockRange") - } - - var r0 EVMBlocks - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) EVMBlocks); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(EVMBlocks) - } - } - - return r0 -} - -// GetLastFinalizedBlock provides a mock function with given fields: ctx -func (_m *EVMDownloaderMock) GetLastFinalizedBlock(ctx context.Context) (*types.Header, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetLastFinalizedBlock") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*types.Header, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *types.Header); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetLogs provides a mock function with given fields: ctx, fromBlock, toBlock -func (_m *EVMDownloaderMock) GetLogs(ctx context.Context, fromBlock uint64, toBlock uint64) []types.Log { - ret := _m.Called(ctx, fromBlock, toBlock) - - if len(ret) == 0 { - panic("no return value specified for GetLogs") - } - - var r0 []types.Log - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64) []types.Log); ok { - r0 = rf(ctx, fromBlock, toBlock) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Log) - } - } - - return r0 -} - -// EVMDownloaderMock_GetLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLogs' -type EVMDownloaderMock_GetLogs_Call struct { - *mock.Call -} - -// GetLogs is a helper method to define mock.On call -// - ctx context.Context -// - fromBlock uint64 -// - toBlock uint64 -func (_e *EVMDownloaderMock_Expecter) GetLogs(ctx interface{}, fromBlock interface{}, toBlock interface{}) *EVMDownloaderMock_GetLogs_Call { - return &EVMDownloaderMock_GetLogs_Call{Call: _e.mock.On("GetLogs", ctx, fromBlock, toBlock)} -} - -func (_c *EVMDownloaderMock_GetLogs_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock uint64)) *EVMDownloaderMock_GetLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64), args[2].(uint64)) - }) - return _c -} - -func (_c *EVMDownloaderMock_GetLogs_Call) Return(_a0 []types.Log) *EVMDownloaderMock_GetLogs_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *EVMDownloaderMock_GetLogs_Call) RunAndReturn(run func(context.Context, uint64, uint64) []types.Log) *EVMDownloaderMock_GetLogs_Call { - _c.Call.Return(run) - return _c -} - -// WaitForNewBlocks provides a mock function with given fields: ctx, lastBlockSeen -func (_m *EVMDownloaderMock) WaitForNewBlocks(ctx context.Context, lastBlockSeen uint64) uint64 { - ret := _m.Called(ctx, lastBlockSeen) - - if len(ret) == 0 { - panic("no return value specified for WaitForNewBlocks") - } - - var r0 uint64 - if rf, ok := ret.Get(0).(func(context.Context, uint64) uint64); ok { - r0 = rf(ctx, lastBlockSeen) - } else { - r0 = ret.Get(0).(uint64) - } - - return r0 -} - -// EVMDownloaderMock_WaitForNewBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitForNewBlocks' -type EVMDownloaderMock_WaitForNewBlocks_Call struct { - *mock.Call -} - -// WaitForNewBlocks is a helper method to define mock.On call -// - ctx context.Context -// - lastBlockSeen uint64 -func (_e *EVMDownloaderMock_Expecter) WaitForNewBlocks(ctx interface{}, lastBlockSeen interface{}) *EVMDownloaderMock_WaitForNewBlocks_Call { - return &EVMDownloaderMock_WaitForNewBlocks_Call{Call: _e.mock.On("WaitForNewBlocks", ctx, lastBlockSeen)} -} - -func (_c *EVMDownloaderMock_WaitForNewBlocks_Call) Run(run func(ctx context.Context, lastBlockSeen uint64)) *EVMDownloaderMock_WaitForNewBlocks_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64)) - }) - return _c -} - -func (_c *EVMDownloaderMock_WaitForNewBlocks_Call) Return(newLastBlock uint64) *EVMDownloaderMock_WaitForNewBlocks_Call { - _c.Call.Return(newLastBlock) - return _c -} - -func (_c *EVMDownloaderMock_WaitForNewBlocks_Call) RunAndReturn(run func(context.Context, uint64) uint64) *EVMDownloaderMock_WaitForNewBlocks_Call { - _c.Call.Return(run) - return _c -} - -// NewEVMDownloaderMock creates a new instance of EVMDownloaderMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewEVMDownloaderMock(t interface { - mock.TestingT - Cleanup(func()) -}) *EVMDownloaderMock { - mock := &EVMDownloaderMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/sync/mock_l2_test.go b/sync/mock_l2_test.go deleted file mode 100644 index 955af0dbb..000000000 --- a/sync/mock_l2_test.go +++ /dev/null @@ -1,1086 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package sync - -import ( - context "context" - big "math/big" - - common "github.com/ethereum/go-ethereum/common" - - ethereum "github.com/ethereum/go-ethereum" - - mock "github.com/stretchr/testify/mock" - - types "github.com/ethereum/go-ethereum/core/types" -) - -// L2Mock is an autogenerated mock type for the EthClienter type -type L2Mock struct { - mock.Mock -} - -type L2Mock_Expecter struct { - mock *mock.Mock -} - -func (_m *L2Mock) EXPECT() *L2Mock_Expecter { - return &L2Mock_Expecter{mock: &_m.Mock} -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *L2Mock) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for BlockByHash") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Block, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Block); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_BlockByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByHash' -type L2Mock_BlockByHash_Call struct { - *mock.Call -} - -// BlockByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *L2Mock_Expecter) BlockByHash(ctx interface{}, hash interface{}) *L2Mock_BlockByHash_Call { - return &L2Mock_BlockByHash_Call{Call: _e.mock.On("BlockByHash", ctx, hash)} -} - -func (_c *L2Mock_BlockByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *L2Mock_BlockByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *L2Mock_BlockByHash_Call) Return(_a0 *types.Block, _a1 error) *L2Mock_BlockByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_BlockByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Block, error)) *L2Mock_BlockByHash_Call { - _c.Call.Return(run) - return _c -} - -// BlockByNumber provides a mock function with given fields: ctx, number -func (_m *L2Mock) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for BlockByNumber") - } - - var r0 *types.Block - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Block, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Block); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockByNumber' -type L2Mock_BlockByNumber_Call struct { - *mock.Call -} - -// BlockByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *L2Mock_Expecter) BlockByNumber(ctx interface{}, number interface{}) *L2Mock_BlockByNumber_Call { - return &L2Mock_BlockByNumber_Call{Call: _e.mock.On("BlockByNumber", ctx, number)} -} - -func (_c *L2Mock_BlockByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *L2Mock_BlockByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *L2Mock_BlockByNumber_Call) Return(_a0 *types.Block, _a1 error) *L2Mock_BlockByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_BlockByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Block, error)) *L2Mock_BlockByNumber_Call { - _c.Call.Return(run) - return _c -} - -// BlockNumber provides a mock function with given fields: ctx -func (_m *L2Mock) BlockNumber(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for BlockNumber") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_BlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BlockNumber' -type L2Mock_BlockNumber_Call struct { - *mock.Call -} - -// BlockNumber is a helper method to define mock.On call -// - ctx context.Context -func (_e *L2Mock_Expecter) BlockNumber(ctx interface{}) *L2Mock_BlockNumber_Call { - return &L2Mock_BlockNumber_Call{Call: _e.mock.On("BlockNumber", ctx)} -} - -func (_c *L2Mock_BlockNumber_Call) Run(run func(ctx context.Context)) *L2Mock_BlockNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *L2Mock_BlockNumber_Call) Return(_a0 uint64, _a1 error) *L2Mock_BlockNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_BlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *L2Mock_BlockNumber_Call { - _c.Call.Return(run) - return _c -} - -// CallContract provides a mock function with given fields: ctx, call, blockNumber -func (_m *L2Mock) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, call, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CallContract") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)); ok { - return rf(ctx, call, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg, *big.Int) []byte); ok { - r0 = rf(ctx, call, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg, *big.Int) error); ok { - r1 = rf(ctx, call, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_CallContract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CallContract' -type L2Mock_CallContract_Call struct { - *mock.Call -} - -// CallContract is a helper method to define mock.On call -// - ctx context.Context -// - call ethereum.CallMsg -// - blockNumber *big.Int -func (_e *L2Mock_Expecter) CallContract(ctx interface{}, call interface{}, blockNumber interface{}) *L2Mock_CallContract_Call { - return &L2Mock_CallContract_Call{Call: _e.mock.On("CallContract", ctx, call, blockNumber)} -} - -func (_c *L2Mock_CallContract_Call) Run(run func(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int)) *L2Mock_CallContract_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.CallMsg), args[2].(*big.Int)) - }) - return _c -} - -func (_c *L2Mock_CallContract_Call) Return(_a0 []byte, _a1 error) *L2Mock_CallContract_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_CallContract_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg, *big.Int) ([]byte, error)) *L2Mock_CallContract_Call { - _c.Call.Return(run) - return _c -} - -// CodeAt provides a mock function with given fields: ctx, contract, blockNumber -func (_m *L2Mock) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - ret := _m.Called(ctx, contract, blockNumber) - - if len(ret) == 0 { - panic("no return value specified for CodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) ([]byte, error)); ok { - return rf(ctx, contract, blockNumber) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address, *big.Int) []byte); ok { - r0 = rf(ctx, contract, blockNumber) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address, *big.Int) error); ok { - r1 = rf(ctx, contract, blockNumber) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_CodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CodeAt' -type L2Mock_CodeAt_Call struct { - *mock.Call -} - -// CodeAt is a helper method to define mock.On call -// - ctx context.Context -// - contract common.Address -// - blockNumber *big.Int -func (_e *L2Mock_Expecter) CodeAt(ctx interface{}, contract interface{}, blockNumber interface{}) *L2Mock_CodeAt_Call { - return &L2Mock_CodeAt_Call{Call: _e.mock.On("CodeAt", ctx, contract, blockNumber)} -} - -func (_c *L2Mock_CodeAt_Call) Run(run func(ctx context.Context, contract common.Address, blockNumber *big.Int)) *L2Mock_CodeAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address), args[2].(*big.Int)) - }) - return _c -} - -func (_c *L2Mock_CodeAt_Call) Return(_a0 []byte, _a1 error) *L2Mock_CodeAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_CodeAt_Call) RunAndReturn(run func(context.Context, common.Address, *big.Int) ([]byte, error)) *L2Mock_CodeAt_Call { - _c.Call.Return(run) - return _c -} - -// EstimateGas provides a mock function with given fields: ctx, call -func (_m *L2Mock) EstimateGas(ctx context.Context, call ethereum.CallMsg) (uint64, error) { - ret := _m.Called(ctx, call) - - if len(ret) == 0 { - panic("no return value specified for EstimateGas") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) (uint64, error)); ok { - return rf(ctx, call) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.CallMsg) uint64); ok { - r0 = rf(ctx, call) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.CallMsg) error); ok { - r1 = rf(ctx, call) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_EstimateGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EstimateGas' -type L2Mock_EstimateGas_Call struct { - *mock.Call -} - -// EstimateGas is a helper method to define mock.On call -// - ctx context.Context -// - call ethereum.CallMsg -func (_e *L2Mock_Expecter) EstimateGas(ctx interface{}, call interface{}) *L2Mock_EstimateGas_Call { - return &L2Mock_EstimateGas_Call{Call: _e.mock.On("EstimateGas", ctx, call)} -} - -func (_c *L2Mock_EstimateGas_Call) Run(run func(ctx context.Context, call ethereum.CallMsg)) *L2Mock_EstimateGas_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.CallMsg)) - }) - return _c -} - -func (_c *L2Mock_EstimateGas_Call) Return(_a0 uint64, _a1 error) *L2Mock_EstimateGas_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_EstimateGas_Call) RunAndReturn(run func(context.Context, ethereum.CallMsg) (uint64, error)) *L2Mock_EstimateGas_Call { - _c.Call.Return(run) - return _c -} - -// FilterLogs provides a mock function with given fields: ctx, q -func (_m *L2Mock) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - ret := _m.Called(ctx, q) - - if len(ret) == 0 { - panic("no return value specified for FilterLogs") - } - - var r0 []types.Log - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) ([]types.Log, error)); ok { - return rf(ctx, q) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery) []types.Log); ok { - r0 = rf(ctx, q) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Log) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery) error); ok { - r1 = rf(ctx, q) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_FilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FilterLogs' -type L2Mock_FilterLogs_Call struct { - *mock.Call -} - -// FilterLogs is a helper method to define mock.On call -// - ctx context.Context -// - q ethereum.FilterQuery -func (_e *L2Mock_Expecter) FilterLogs(ctx interface{}, q interface{}) *L2Mock_FilterLogs_Call { - return &L2Mock_FilterLogs_Call{Call: _e.mock.On("FilterLogs", ctx, q)} -} - -func (_c *L2Mock_FilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery)) *L2Mock_FilterLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.FilterQuery)) - }) - return _c -} - -func (_c *L2Mock_FilterLogs_Call) Return(_a0 []types.Log, _a1 error) *L2Mock_FilterLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_FilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery) ([]types.Log, error)) *L2Mock_FilterLogs_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByHash provides a mock function with given fields: ctx, hash -func (_m *L2Mock) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - ret := _m.Called(ctx, hash) - - if len(ret) == 0 { - panic("no return value specified for HeaderByHash") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (*types.Header, error)); ok { - return rf(ctx, hash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) *types.Header); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_HeaderByHash_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByHash' -type L2Mock_HeaderByHash_Call struct { - *mock.Call -} - -// HeaderByHash is a helper method to define mock.On call -// - ctx context.Context -// - hash common.Hash -func (_e *L2Mock_Expecter) HeaderByHash(ctx interface{}, hash interface{}) *L2Mock_HeaderByHash_Call { - return &L2Mock_HeaderByHash_Call{Call: _e.mock.On("HeaderByHash", ctx, hash)} -} - -func (_c *L2Mock_HeaderByHash_Call) Run(run func(ctx context.Context, hash common.Hash)) *L2Mock_HeaderByHash_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *L2Mock_HeaderByHash_Call) Return(_a0 *types.Header, _a1 error) *L2Mock_HeaderByHash_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_HeaderByHash_Call) RunAndReturn(run func(context.Context, common.Hash) (*types.Header, error)) *L2Mock_HeaderByHash_Call { - _c.Call.Return(run) - return _c -} - -// HeaderByNumber provides a mock function with given fields: ctx, number -func (_m *L2Mock) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - ret := _m.Called(ctx, number) - - if len(ret) == 0 { - panic("no return value specified for HeaderByNumber") - } - - var r0 *types.Header - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) (*types.Header, error)); ok { - return rf(ctx, number) - } - if rf, ok := ret.Get(0).(func(context.Context, *big.Int) *types.Header); ok { - r0 = rf(ctx, number) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Header) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *big.Int) error); ok { - r1 = rf(ctx, number) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_HeaderByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HeaderByNumber' -type L2Mock_HeaderByNumber_Call struct { - *mock.Call -} - -// HeaderByNumber is a helper method to define mock.On call -// - ctx context.Context -// - number *big.Int -func (_e *L2Mock_Expecter) HeaderByNumber(ctx interface{}, number interface{}) *L2Mock_HeaderByNumber_Call { - return &L2Mock_HeaderByNumber_Call{Call: _e.mock.On("HeaderByNumber", ctx, number)} -} - -func (_c *L2Mock_HeaderByNumber_Call) Run(run func(ctx context.Context, number *big.Int)) *L2Mock_HeaderByNumber_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*big.Int)) - }) - return _c -} - -func (_c *L2Mock_HeaderByNumber_Call) Return(_a0 *types.Header, _a1 error) *L2Mock_HeaderByNumber_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_HeaderByNumber_Call) RunAndReturn(run func(context.Context, *big.Int) (*types.Header, error)) *L2Mock_HeaderByNumber_Call { - _c.Call.Return(run) - return _c -} - -// PendingCodeAt provides a mock function with given fields: ctx, account -func (_m *L2Mock) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingCodeAt") - } - - var r0 []byte - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) ([]byte, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) []byte); ok { - r0 = rf(ctx, account) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_PendingCodeAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingCodeAt' -type L2Mock_PendingCodeAt_Call struct { - *mock.Call -} - -// PendingCodeAt is a helper method to define mock.On call -// - ctx context.Context -// - account common.Address -func (_e *L2Mock_Expecter) PendingCodeAt(ctx interface{}, account interface{}) *L2Mock_PendingCodeAt_Call { - return &L2Mock_PendingCodeAt_Call{Call: _e.mock.On("PendingCodeAt", ctx, account)} -} - -func (_c *L2Mock_PendingCodeAt_Call) Run(run func(ctx context.Context, account common.Address)) *L2Mock_PendingCodeAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address)) - }) - return _c -} - -func (_c *L2Mock_PendingCodeAt_Call) Return(_a0 []byte, _a1 error) *L2Mock_PendingCodeAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_PendingCodeAt_Call) RunAndReturn(run func(context.Context, common.Address) ([]byte, error)) *L2Mock_PendingCodeAt_Call { - _c.Call.Return(run) - return _c -} - -// PendingNonceAt provides a mock function with given fields: ctx, account -func (_m *L2Mock) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - ret := _m.Called(ctx, account) - - if len(ret) == 0 { - panic("no return value specified for PendingNonceAt") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Address) (uint64, error)); ok { - return rf(ctx, account) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Address) uint64); ok { - r0 = rf(ctx, account) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Address) error); ok { - r1 = rf(ctx, account) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_PendingNonceAt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PendingNonceAt' -type L2Mock_PendingNonceAt_Call struct { - *mock.Call -} - -// PendingNonceAt is a helper method to define mock.On call -// - ctx context.Context -// - account common.Address -func (_e *L2Mock_Expecter) PendingNonceAt(ctx interface{}, account interface{}) *L2Mock_PendingNonceAt_Call { - return &L2Mock_PendingNonceAt_Call{Call: _e.mock.On("PendingNonceAt", ctx, account)} -} - -func (_c *L2Mock_PendingNonceAt_Call) Run(run func(ctx context.Context, account common.Address)) *L2Mock_PendingNonceAt_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Address)) - }) - return _c -} - -func (_c *L2Mock_PendingNonceAt_Call) Return(_a0 uint64, _a1 error) *L2Mock_PendingNonceAt_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_PendingNonceAt_Call) RunAndReturn(run func(context.Context, common.Address) (uint64, error)) *L2Mock_PendingNonceAt_Call { - _c.Call.Return(run) - return _c -} - -// SendTransaction provides a mock function with given fields: ctx, tx -func (_m *L2Mock) SendTransaction(ctx context.Context, tx *types.Transaction) error { - ret := _m.Called(ctx, tx) - - if len(ret) == 0 { - panic("no return value specified for SendTransaction") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, *types.Transaction) error); ok { - r0 = rf(ctx, tx) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// L2Mock_SendTransaction_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendTransaction' -type L2Mock_SendTransaction_Call struct { - *mock.Call -} - -// SendTransaction is a helper method to define mock.On call -// - ctx context.Context -// - tx *types.Transaction -func (_e *L2Mock_Expecter) SendTransaction(ctx interface{}, tx interface{}) *L2Mock_SendTransaction_Call { - return &L2Mock_SendTransaction_Call{Call: _e.mock.On("SendTransaction", ctx, tx)} -} - -func (_c *L2Mock_SendTransaction_Call) Run(run func(ctx context.Context, tx *types.Transaction)) *L2Mock_SendTransaction_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*types.Transaction)) - }) - return _c -} - -func (_c *L2Mock_SendTransaction_Call) Return(_a0 error) *L2Mock_SendTransaction_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *L2Mock_SendTransaction_Call) RunAndReturn(run func(context.Context, *types.Transaction) error) *L2Mock_SendTransaction_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeFilterLogs provides a mock function with given fields: ctx, q, ch -func (_m *L2Mock) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - ret := _m.Called(ctx, q, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeFilterLogs") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)); ok { - return rf(ctx, q, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) ethereum.Subscription); ok { - r0 = rf(ctx, q, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, ethereum.FilterQuery, chan<- types.Log) error); ok { - r1 = rf(ctx, q, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_SubscribeFilterLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeFilterLogs' -type L2Mock_SubscribeFilterLogs_Call struct { - *mock.Call -} - -// SubscribeFilterLogs is a helper method to define mock.On call -// - ctx context.Context -// - q ethereum.FilterQuery -// - ch chan<- types.Log -func (_e *L2Mock_Expecter) SubscribeFilterLogs(ctx interface{}, q interface{}, ch interface{}) *L2Mock_SubscribeFilterLogs_Call { - return &L2Mock_SubscribeFilterLogs_Call{Call: _e.mock.On("SubscribeFilterLogs", ctx, q, ch)} -} - -func (_c *L2Mock_SubscribeFilterLogs_Call) Run(run func(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log)) *L2Mock_SubscribeFilterLogs_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(ethereum.FilterQuery), args[2].(chan<- types.Log)) - }) - return _c -} - -func (_c *L2Mock_SubscribeFilterLogs_Call) Return(_a0 ethereum.Subscription, _a1 error) *L2Mock_SubscribeFilterLogs_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_SubscribeFilterLogs_Call) RunAndReturn(run func(context.Context, ethereum.FilterQuery, chan<- types.Log) (ethereum.Subscription, error)) *L2Mock_SubscribeFilterLogs_Call { - _c.Call.Return(run) - return _c -} - -// SubscribeNewHead provides a mock function with given fields: ctx, ch -func (_m *L2Mock) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) { - ret := _m.Called(ctx, ch) - - if len(ret) == 0 { - panic("no return value specified for SubscribeNewHead") - } - - var r0 ethereum.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)); ok { - return rf(ctx, ch) - } - if rf, ok := ret.Get(0).(func(context.Context, chan<- *types.Header) ethereum.Subscription); ok { - r0 = rf(ctx, ch) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ethereum.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, chan<- *types.Header) error); ok { - r1 = rf(ctx, ch) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_SubscribeNewHead_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SubscribeNewHead' -type L2Mock_SubscribeNewHead_Call struct { - *mock.Call -} - -// SubscribeNewHead is a helper method to define mock.On call -// - ctx context.Context -// - ch chan<- *types.Header -func (_e *L2Mock_Expecter) SubscribeNewHead(ctx interface{}, ch interface{}) *L2Mock_SubscribeNewHead_Call { - return &L2Mock_SubscribeNewHead_Call{Call: _e.mock.On("SubscribeNewHead", ctx, ch)} -} - -func (_c *L2Mock_SubscribeNewHead_Call) Run(run func(ctx context.Context, ch chan<- *types.Header)) *L2Mock_SubscribeNewHead_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(chan<- *types.Header)) - }) - return _c -} - -func (_c *L2Mock_SubscribeNewHead_Call) Return(_a0 ethereum.Subscription, _a1 error) *L2Mock_SubscribeNewHead_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_SubscribeNewHead_Call) RunAndReturn(run func(context.Context, chan<- *types.Header) (ethereum.Subscription, error)) *L2Mock_SubscribeNewHead_Call { - _c.Call.Return(run) - return _c -} - -// SuggestGasPrice provides a mock function with given fields: ctx -func (_m *L2Mock) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasPrice") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_SuggestGasPrice_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasPrice' -type L2Mock_SuggestGasPrice_Call struct { - *mock.Call -} - -// SuggestGasPrice is a helper method to define mock.On call -// - ctx context.Context -func (_e *L2Mock_Expecter) SuggestGasPrice(ctx interface{}) *L2Mock_SuggestGasPrice_Call { - return &L2Mock_SuggestGasPrice_Call{Call: _e.mock.On("SuggestGasPrice", ctx)} -} - -func (_c *L2Mock_SuggestGasPrice_Call) Run(run func(ctx context.Context)) *L2Mock_SuggestGasPrice_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *L2Mock_SuggestGasPrice_Call) Return(_a0 *big.Int, _a1 error) *L2Mock_SuggestGasPrice_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_SuggestGasPrice_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *L2Mock_SuggestGasPrice_Call { - _c.Call.Return(run) - return _c -} - -// SuggestGasTipCap provides a mock function with given fields: ctx -func (_m *L2Mock) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SuggestGasTipCap") - } - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*big.Int, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *big.Int); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_SuggestGasTipCap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SuggestGasTipCap' -type L2Mock_SuggestGasTipCap_Call struct { - *mock.Call -} - -// SuggestGasTipCap is a helper method to define mock.On call -// - ctx context.Context -func (_e *L2Mock_Expecter) SuggestGasTipCap(ctx interface{}) *L2Mock_SuggestGasTipCap_Call { - return &L2Mock_SuggestGasTipCap_Call{Call: _e.mock.On("SuggestGasTipCap", ctx)} -} - -func (_c *L2Mock_SuggestGasTipCap_Call) Run(run func(ctx context.Context)) *L2Mock_SuggestGasTipCap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *L2Mock_SuggestGasTipCap_Call) Return(_a0 *big.Int, _a1 error) *L2Mock_SuggestGasTipCap_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_SuggestGasTipCap_Call) RunAndReturn(run func(context.Context) (*big.Int, error)) *L2Mock_SuggestGasTipCap_Call { - _c.Call.Return(run) - return _c -} - -// TransactionCount provides a mock function with given fields: ctx, blockHash -func (_m *L2Mock) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) { - ret := _m.Called(ctx, blockHash) - - if len(ret) == 0 { - panic("no return value specified for TransactionCount") - } - - var r0 uint - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (uint, error)); ok { - return rf(ctx, blockHash) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash) uint); ok { - r0 = rf(ctx, blockHash) - } else { - r0 = ret.Get(0).(uint) - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, blockHash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_TransactionCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionCount' -type L2Mock_TransactionCount_Call struct { - *mock.Call -} - -// TransactionCount is a helper method to define mock.On call -// - ctx context.Context -// - blockHash common.Hash -func (_e *L2Mock_Expecter) TransactionCount(ctx interface{}, blockHash interface{}) *L2Mock_TransactionCount_Call { - return &L2Mock_TransactionCount_Call{Call: _e.mock.On("TransactionCount", ctx, blockHash)} -} - -func (_c *L2Mock_TransactionCount_Call) Run(run func(ctx context.Context, blockHash common.Hash)) *L2Mock_TransactionCount_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash)) - }) - return _c -} - -func (_c *L2Mock_TransactionCount_Call) Return(_a0 uint, _a1 error) *L2Mock_TransactionCount_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_TransactionCount_Call) RunAndReturn(run func(context.Context, common.Hash) (uint, error)) *L2Mock_TransactionCount_Call { - _c.Call.Return(run) - return _c -} - -// TransactionInBlock provides a mock function with given fields: ctx, blockHash, index -func (_m *L2Mock) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) { - ret := _m.Called(ctx, blockHash, index) - - if len(ret) == 0 { - panic("no return value specified for TransactionInBlock") - } - - var r0 *types.Transaction - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) (*types.Transaction, error)); ok { - return rf(ctx, blockHash, index) - } - if rf, ok := ret.Get(0).(func(context.Context, common.Hash, uint) *types.Transaction); ok { - r0 = rf(ctx, blockHash, index) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Transaction) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, common.Hash, uint) error); ok { - r1 = rf(ctx, blockHash, index) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// L2Mock_TransactionInBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TransactionInBlock' -type L2Mock_TransactionInBlock_Call struct { - *mock.Call -} - -// TransactionInBlock is a helper method to define mock.On call -// - ctx context.Context -// - blockHash common.Hash -// - index uint -func (_e *L2Mock_Expecter) TransactionInBlock(ctx interface{}, blockHash interface{}, index interface{}) *L2Mock_TransactionInBlock_Call { - return &L2Mock_TransactionInBlock_Call{Call: _e.mock.On("TransactionInBlock", ctx, blockHash, index)} -} - -func (_c *L2Mock_TransactionInBlock_Call) Run(run func(ctx context.Context, blockHash common.Hash, index uint)) *L2Mock_TransactionInBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(common.Hash), args[2].(uint)) - }) - return _c -} - -func (_c *L2Mock_TransactionInBlock_Call) Return(_a0 *types.Transaction, _a1 error) *L2Mock_TransactionInBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *L2Mock_TransactionInBlock_Call) RunAndReturn(run func(context.Context, common.Hash, uint) (*types.Transaction, error)) *L2Mock_TransactionInBlock_Call { - _c.Call.Return(run) - return _c -} - -// NewL2Mock creates a new instance of L2Mock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewL2Mock(t interface { - mock.TestingT - Cleanup(func()) -}) *L2Mock { - mock := &L2Mock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/sync/mock_processor_test.go b/sync/mock_processor_test.go deleted file mode 100644 index 96ece8d42..000000000 --- a/sync/mock_processor_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package sync - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" -) - -// ProcessorMock is an autogenerated mock type for the processorInterface type -type ProcessorMock struct { - mock.Mock -} - -type ProcessorMock_Expecter struct { - mock *mock.Mock -} - -func (_m *ProcessorMock) EXPECT() *ProcessorMock_Expecter { - return &ProcessorMock_Expecter{mock: &_m.Mock} -} - -// GetLastProcessedBlock provides a mock function with given fields: ctx -func (_m *ProcessorMock) GetLastProcessedBlock(ctx context.Context) (uint64, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for GetLastProcessedBlock") - } - - var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { - r0 = rf(ctx) - } else { - r0 = ret.Get(0).(uint64) - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ProcessorMock_GetLastProcessedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastProcessedBlock' -type ProcessorMock_GetLastProcessedBlock_Call struct { - *mock.Call -} - -// GetLastProcessedBlock is a helper method to define mock.On call -// - ctx context.Context -func (_e *ProcessorMock_Expecter) GetLastProcessedBlock(ctx interface{}) *ProcessorMock_GetLastProcessedBlock_Call { - return &ProcessorMock_GetLastProcessedBlock_Call{Call: _e.mock.On("GetLastProcessedBlock", ctx)} -} - -func (_c *ProcessorMock_GetLastProcessedBlock_Call) Run(run func(ctx context.Context)) *ProcessorMock_GetLastProcessedBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *ProcessorMock_GetLastProcessedBlock_Call) Return(_a0 uint64, _a1 error) *ProcessorMock_GetLastProcessedBlock_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ProcessorMock_GetLastProcessedBlock_Call) RunAndReturn(run func(context.Context) (uint64, error)) *ProcessorMock_GetLastProcessedBlock_Call { - _c.Call.Return(run) - return _c -} - -// ProcessBlock provides a mock function with given fields: ctx, block -func (_m *ProcessorMock) ProcessBlock(ctx context.Context, block Block) error { - ret := _m.Called(ctx, block) - - if len(ret) == 0 { - panic("no return value specified for ProcessBlock") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, Block) error); ok { - r0 = rf(ctx, block) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ProcessorMock_ProcessBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBlock' -type ProcessorMock_ProcessBlock_Call struct { - *mock.Call -} - -// ProcessBlock is a helper method to define mock.On call -// - ctx context.Context -// - block Block -func (_e *ProcessorMock_Expecter) ProcessBlock(ctx interface{}, block interface{}) *ProcessorMock_ProcessBlock_Call { - return &ProcessorMock_ProcessBlock_Call{Call: _e.mock.On("ProcessBlock", ctx, block)} -} - -func (_c *ProcessorMock_ProcessBlock_Call) Run(run func(ctx context.Context, block Block)) *ProcessorMock_ProcessBlock_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(Block)) - }) - return _c -} - -func (_c *ProcessorMock_ProcessBlock_Call) Return(_a0 error) *ProcessorMock_ProcessBlock_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ProcessorMock_ProcessBlock_Call) RunAndReturn(run func(context.Context, Block) error) *ProcessorMock_ProcessBlock_Call { - _c.Call.Return(run) - return _c -} - -// Reorg provides a mock function with given fields: ctx, firstReorgedBlock -func (_m *ProcessorMock) Reorg(ctx context.Context, firstReorgedBlock uint64) error { - ret := _m.Called(ctx, firstReorgedBlock) - - if len(ret) == 0 { - panic("no return value specified for Reorg") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, uint64) error); ok { - r0 = rf(ctx, firstReorgedBlock) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ProcessorMock_Reorg_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Reorg' -type ProcessorMock_Reorg_Call struct { - *mock.Call -} - -// Reorg is a helper method to define mock.On call -// - ctx context.Context -// - firstReorgedBlock uint64 -func (_e *ProcessorMock_Expecter) Reorg(ctx interface{}, firstReorgedBlock interface{}) *ProcessorMock_Reorg_Call { - return &ProcessorMock_Reorg_Call{Call: _e.mock.On("Reorg", ctx, firstReorgedBlock)} -} - -func (_c *ProcessorMock_Reorg_Call) Run(run func(ctx context.Context, firstReorgedBlock uint64)) *ProcessorMock_Reorg_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(uint64)) - }) - return _c -} - -func (_c *ProcessorMock_Reorg_Call) Return(_a0 error) *ProcessorMock_Reorg_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ProcessorMock_Reorg_Call) RunAndReturn(run func(context.Context, uint64) error) *ProcessorMock_Reorg_Call { - _c.Call.Return(run) - return _c -} - -// NewProcessorMock creates a new instance of ProcessorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewProcessorMock(t interface { - mock.TestingT - Cleanup(func()) -}) *ProcessorMock { - mock := &ProcessorMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/sync/mock_reorgdetector_test.go b/sync/mock_reorgdetector_test.go deleted file mode 100644 index 43551baa2..000000000 --- a/sync/mock_reorgdetector_test.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package sync - -import ( - context "context" - - common "github.com/ethereum/go-ethereum/common" - - mock "github.com/stretchr/testify/mock" - - reorgdetector "github.com/0xPolygon/cdk/reorgdetector" -) - -// ReorgDetectorMock is an autogenerated mock type for the ReorgDetector type -type ReorgDetectorMock struct { - mock.Mock -} - -type ReorgDetectorMock_Expecter struct { - mock *mock.Mock -} - -func (_m *ReorgDetectorMock) EXPECT() *ReorgDetectorMock_Expecter { - return &ReorgDetectorMock_Expecter{mock: &_m.Mock} -} - -// AddBlockToTrack provides a mock function with given fields: ctx, id, blockNum, blockHash -func (_m *ReorgDetectorMock) AddBlockToTrack(ctx context.Context, id string, blockNum uint64, blockHash common.Hash) error { - ret := _m.Called(ctx, id, blockNum, blockHash) - - if len(ret) == 0 { - panic("no return value specified for AddBlockToTrack") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint64, common.Hash) error); ok { - r0 = rf(ctx, id, blockNum, blockHash) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ReorgDetectorMock_AddBlockToTrack_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddBlockToTrack' -type ReorgDetectorMock_AddBlockToTrack_Call struct { - *mock.Call -} - -// AddBlockToTrack is a helper method to define mock.On call -// - ctx context.Context -// - id string -// - blockNum uint64 -// - blockHash common.Hash -func (_e *ReorgDetectorMock_Expecter) AddBlockToTrack(ctx interface{}, id interface{}, blockNum interface{}, blockHash interface{}) *ReorgDetectorMock_AddBlockToTrack_Call { - return &ReorgDetectorMock_AddBlockToTrack_Call{Call: _e.mock.On("AddBlockToTrack", ctx, id, blockNum, blockHash)} -} - -func (_c *ReorgDetectorMock_AddBlockToTrack_Call) Run(run func(ctx context.Context, id string, blockNum uint64, blockHash common.Hash)) *ReorgDetectorMock_AddBlockToTrack_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(common.Hash)) - }) - return _c -} - -func (_c *ReorgDetectorMock_AddBlockToTrack_Call) Return(_a0 error) *ReorgDetectorMock_AddBlockToTrack_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *ReorgDetectorMock_AddBlockToTrack_Call) RunAndReturn(run func(context.Context, string, uint64, common.Hash) error) *ReorgDetectorMock_AddBlockToTrack_Call { - _c.Call.Return(run) - return _c -} - -// Subscribe provides a mock function with given fields: id -func (_m *ReorgDetectorMock) Subscribe(id string) (*reorgdetector.Subscription, error) { - ret := _m.Called(id) - - if len(ret) == 0 { - panic("no return value specified for Subscribe") - } - - var r0 *reorgdetector.Subscription - var r1 error - if rf, ok := ret.Get(0).(func(string) (*reorgdetector.Subscription, error)); ok { - return rf(id) - } - if rf, ok := ret.Get(0).(func(string) *reorgdetector.Subscription); ok { - r0 = rf(id) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*reorgdetector.Subscription) - } - } - - if rf, ok := ret.Get(1).(func(string) error); ok { - r1 = rf(id) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ReorgDetectorMock_Subscribe_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Subscribe' -type ReorgDetectorMock_Subscribe_Call struct { - *mock.Call -} - -// Subscribe is a helper method to define mock.On call -// - id string -func (_e *ReorgDetectorMock_Expecter) Subscribe(id interface{}) *ReorgDetectorMock_Subscribe_Call { - return &ReorgDetectorMock_Subscribe_Call{Call: _e.mock.On("Subscribe", id)} -} - -func (_c *ReorgDetectorMock_Subscribe_Call) Run(run func(id string)) *ReorgDetectorMock_Subscribe_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(string)) - }) - return _c -} - -func (_c *ReorgDetectorMock_Subscribe_Call) Return(_a0 *reorgdetector.Subscription, _a1 error) *ReorgDetectorMock_Subscribe_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *ReorgDetectorMock_Subscribe_Call) RunAndReturn(run func(string) (*reorgdetector.Subscription, error)) *ReorgDetectorMock_Subscribe_Call { - _c.Call.Return(run) - return _c -} - -// NewReorgDetectorMock creates a new instance of ReorgDetectorMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewReorgDetectorMock(t interface { - mock.TestingT - Cleanup(func()) -}) *ReorgDetectorMock { - mock := &ReorgDetectorMock{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/test/Makefile b/test/Makefile index 2e81a453a..b05b83ce5 100644 --- a/test/Makefile +++ b/test/Makefile @@ -1,105 +1,28 @@ -.PHONY: generate-mocks -generate-mocks: generate-mocks-bridgesync generate-mocks-reorgdetector \ - generate-mocks-sequencesender generate-mocks-da \ - generate-mocks-l1infotreesync generate-mocks-helpers \ - generate-mocks-sync generate-mocks-aggregator \ - generate-mocks-aggsender generate-mocks-agglayer - -.PHONY: generate-mocks-bridgesync -generate-mocks-bridgesync: ## Generates mocks for bridgesync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../bridgesync --output ../bridgesync/mocks --outpkg mocks_bridgesync ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-reorgdetector -generate-mocks-reorgdetector: ## Generates mocks for reorgdetector, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClient --dir=../reorgdetector --output=../reorgdetector --outpkg=reorgdetector --inpackage --structname=EthClientMock --filename=mock_eth_client.go ${COMMON_MOCKERY_PARAMS} - -COMMON_MOCKERY_PARAMS=--disable-version-string --with-expecter --exported -.PHONY: generate-mocks-sequencesender -generate-mocks-sequencesender: ## Generates mocks for sequencesender, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../sequencesender/txbuilder --output ../sequencesender/txbuilder/mocks_txbuilder --outpkg mocks_txbuilder ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthTxManagerMock --filename=mock_ethtxmanager.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../sequencesender --output=../sequencesender/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-da -generate-mocks-da: ## Generates mocks for dataavailability, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../dataavailability --output ../dataavailability/mocks_da --outpkg mocks_da ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-rpc -generate-mocks-rpc: ## Generates mocks for rpc, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../rpc --output ../rpc/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-l1infotreesync -generate-mocks-l1infotreesync: ## Generates mocks for l1infotreesync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../l1infotreesync --output ../l1infotreesync/mocks --outpkg mocks_l1infotreesync ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../l1infotreesync/mocks --outpkg=mocks_l1infotreesync --structname=ReorgDetectorMock --filename=mock_reorgdetector.go ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-helpers -generate-mocks-helpers: ## Generates mocks for helpers, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir=../aggoracle/chaingersender --output=./helpers --outpkg=helpers --structname=EthTxManagerMock --filename=mock_ethtxmanager.go ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-aggoracle -generate-mocks-aggoracle: ## Generates mocks for aggoracle, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManager --dir ../aggoracle/chaingersender --output ../aggoracle/mocks --outpkg mocks --structname=EthTxManagerMock --filename=mock_ethtxmanager.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=L2GERManager --dir ../aggoracle/chaingersender --output ../aggoracle/mocks --outpkg mocks --structname=L2GERManagerMock --filename=mock_l2germanager.go ${COMMON_MOCKERY_PARAMS} +COMMON_MOCKERY_PARAMS=--with-expecter --exported -.PHONY: generate-mocks-sync -generate-mocks-sync: ## Generates mocks for sync, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthClienter --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=L2Mock --filename=mock_l2_test.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=evmDownloaderFull --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=EVMDownloaderMock --filename=mock_downloader_test.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=processorInterface --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ProcessorMock --filename=mock_processor_test.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ReorgDetector --dir=../sync --output=../sync --outpkg=sync --inpackage --structname=ReorgDetectorMock --filename=mock_reorgdetector_test.go ${COMMON_MOCKERY_PARAMS} - - -.PHONY: generate-mocks-aggregator -generate-mocks-aggregator: ## Generates mocks for aggregator, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=ProverInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=ProverInterfaceMock --filename=mock_prover.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Etherman --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthermanMock --filename=mock_etherman.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=StorageInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=StorageInterfaceMock --filename=mock_storage.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Synchronizer --srcpkg=github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer --output=../aggregator/mocks --outpkg=mocks --structname=SynchronizerInterfaceMock --filename=mock_synchronizer.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=EthTxManagerClient --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=EthTxManagerClientMock --filename=mock_eth_tx_manager.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=Txer --dir=../db --output=../aggregator/mocks --outpkg=mocks --structname=TxerMock --filename=mock_txer.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=RPCInterface --dir=../aggregator --output=../aggregator/mocks --outpkg=mocks --structname=RPCInterfaceMock --filename=mock_rpc.go ${COMMON_MOCKERY_PARAMS} - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AggregatorService_ChannelServer --dir=../aggregator/prover --output=../aggregator/prover/mocks --outpkg=mocks --structname=ChannelMock --filename=mock_channel.go ${COMMON_MOCKERY_PARAMS} - - -.PHONY: generate-mocks-aggsender -generate-mocks-aggsender: ## Generates mocks for aggsender, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --all --case snake --dir ../aggsender --output ../aggsender/mocks --outpkg mocks ${COMMON_MOCKERY_PARAMS} - -.PHONY: generate-mocks-agglayer -generate-mocks-agglayer: ## Generates mocks for agglayer, using mockery tool - export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/mockery --name=AgglayerClientInterface --dir=../agglayer --output=../agglayer --outpkg=agglayer --inpackage --structname=AgglayerClientMock --filename=mock_agglayer_client.go ${COMMON_MOCKERY_PARAMS} +.PHONY: generate-mocks +generate-mocks: + mockery ${COMMON_MOCKERY_PARAMS} .PHONY: test-e2e-fork9-validium test-e2e-fork9-validium: stop ./run-e2e.sh fork9 cdk-validium - bats bats/fep/ + bats bats/ .PHONY: test-e2e-fork11-rollup test-e2e-fork11-rollup: stop ./run-e2e.sh fork11 rollup - bats bats/fep/ + bats bats/ .PHONY: test-e2e-fork12-validium test-e2e-fork12-validium: stop ./run-e2e.sh fork12 cdk-validium - bats bats/fep/ + bats bats/ .PHONY: test-e2e-fork12-rollup test-e2e-fork12-rollup: stop ./run-e2e.sh fork12 rollup - bats bats/fep/ - -.PHONY: test-e2e-fork12-pessimistic -test-e2e-fork12-pessimistic: stop - ./run-e2e.sh fork12 pessimistic - bats bats/pp/bridge-e2e.bats bats/pp/e2e-pp.bats - -.PHONY: test-e2e-fork12-multi-pessimistic -test-e2e-fork12-multi-pessimistic: stop - ./run-e2e-multi_pp.sh - bats bats/pp-multi + bats bats/ .PHONY: stop stop: diff --git a/test/aggregator/mocks/mock_synchronizer.go b/test/aggregator/mocks/mock_synchronizer.go new file mode 100644 index 000000000..371344fc6 --- /dev/null +++ b/test/aggregator/mocks/mock_synchronizer.go @@ -0,0 +1,697 @@ +// Code generated by mockery v2.52.2. DO NOT EDIT. + +package mocks + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + mock "github.com/stretchr/testify/mock" + + synchronizer "github.com/0xPolygonHermez/zkevm-synchronizer-l1/synchronizer" +) + +// SynchronizerInterfaceMock is an autogenerated mock type for the Synchronizer type +type SynchronizerInterfaceMock struct { + mock.Mock +} + +type SynchronizerInterfaceMock_Expecter struct { + mock *mock.Mock +} + +func (_m *SynchronizerInterfaceMock) EXPECT() *SynchronizerInterfaceMock_Expecter { + return &SynchronizerInterfaceMock_Expecter{mock: &_m.Mock} +} + +// GetL1BlockByNumber provides a mock function with given fields: ctx, blockNumber +func (_m *SynchronizerInterfaceMock) GetL1BlockByNumber(ctx context.Context, blockNumber uint64) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx, blockNumber) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockByNumber") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.L1Block, error)); ok { + return rf(ctx, blockNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.L1Block); ok { + r0 = rf(ctx, blockNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, blockNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetL1BlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1BlockByNumber' +type SynchronizerInterfaceMock_GetL1BlockByNumber_Call struct { + *mock.Call +} + +// GetL1BlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +func (_e *SynchronizerInterfaceMock_Expecter) GetL1BlockByNumber(ctx interface{}, blockNumber interface{}) *SynchronizerInterfaceMock_GetL1BlockByNumber_Call { + return &SynchronizerInterfaceMock_GetL1BlockByNumber_Call{Call: _e.mock.On("GetL1BlockByNumber", ctx, blockNumber)} +} + +func (_c *SynchronizerInterfaceMock_GetL1BlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64)) *SynchronizerInterfaceMock_GetL1BlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetL1BlockByNumber_Call) Return(_a0 *synchronizer.L1Block, _a1 error) *SynchronizerInterfaceMock_GetL1BlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetL1BlockByNumber_Call) RunAndReturn(run func(context.Context, uint64) (*synchronizer.L1Block, error)) *SynchronizerInterfaceMock_GetL1BlockByNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoRootPerIndex provides a mock function with given fields: ctx, L1InfoTreeIndex +func (_m *SynchronizerInterfaceMock) GetL1InfoRootPerIndex(ctx context.Context, L1InfoTreeIndex uint32) (common.Hash, error) { + ret := _m.Called(ctx, L1InfoTreeIndex) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoRootPerIndex") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint32) (common.Hash, error)); ok { + return rf(ctx, L1InfoTreeIndex) + } + if rf, ok := ret.Get(0).(func(context.Context, uint32) common.Hash); ok { + r0 = rf(ctx, L1InfoTreeIndex) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint32) error); ok { + r1 = rf(ctx, L1InfoTreeIndex) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoRootPerIndex' +type SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call struct { + *mock.Call +} + +// GetL1InfoRootPerIndex is a helper method to define mock.On call +// - ctx context.Context +// - L1InfoTreeIndex uint32 +func (_e *SynchronizerInterfaceMock_Expecter) GetL1InfoRootPerIndex(ctx interface{}, L1InfoTreeIndex interface{}) *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call { + return &SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call{Call: _e.mock.On("GetL1InfoRootPerIndex", ctx, L1InfoTreeIndex)} +} + +func (_c *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call) Run(run func(ctx context.Context, L1InfoTreeIndex uint32)) *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint32)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call) Return(_a0 common.Hash, _a1 error) *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call) RunAndReturn(run func(context.Context, uint32) (common.Hash, error)) *SynchronizerInterfaceMock_GetL1InfoRootPerIndex_Call { + _c.Call.Return(run) + return _c +} + +// GetL1InfoTreeLeaves provides a mock function with given fields: ctx, indexLeaves +func (_m *SynchronizerInterfaceMock) GetL1InfoTreeLeaves(ctx context.Context, indexLeaves []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, indexLeaves) + + if len(ret) == 0 { + panic("no return value specified for GetL1InfoTreeLeaves") + } + + var r0 map[uint32]synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, indexLeaves) + } + if rf, ok := ret.Get(0).(func(context.Context, []uint32) map[uint32]synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, indexLeaves) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[uint32]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []uint32) error); ok { + r1 = rf(ctx, indexLeaves) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1InfoTreeLeaves' +type SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call struct { + *mock.Call +} + +// GetL1InfoTreeLeaves is a helper method to define mock.On call +// - ctx context.Context +// - indexLeaves []uint32 +func (_e *SynchronizerInterfaceMock_Expecter) GetL1InfoTreeLeaves(ctx interface{}, indexLeaves interface{}) *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call { + return &SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call{Call: _e.mock.On("GetL1InfoTreeLeaves", ctx, indexLeaves)} +} + +func (_c *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call) Run(run func(ctx context.Context, indexLeaves []uint32)) *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].([]uint32)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call) Return(_a0 map[uint32]synchronizer.L1InfoTreeLeaf, _a1 error) *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call) RunAndReturn(run func(context.Context, []uint32) (map[uint32]synchronizer.L1InfoTreeLeaf, error)) *SynchronizerInterfaceMock_GetL1InfoTreeLeaves_Call { + _c.Call.Return(run) + return _c +} + +// GetLastL1Block provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastL1Block(ctx context.Context) (*synchronizer.L1Block, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastL1Block") + } + + var r0 *synchronizer.L1Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*synchronizer.L1Block, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *synchronizer.L1Block); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.L1Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetLastL1Block_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastL1Block' +type SynchronizerInterfaceMock_GetLastL1Block_Call struct { + *mock.Call +} + +// GetLastL1Block is a helper method to define mock.On call +// - ctx context.Context +func (_e *SynchronizerInterfaceMock_Expecter) GetLastL1Block(ctx interface{}) *SynchronizerInterfaceMock_GetLastL1Block_Call { + return &SynchronizerInterfaceMock_GetLastL1Block_Call{Call: _e.mock.On("GetLastL1Block", ctx)} +} + +func (_c *SynchronizerInterfaceMock_GetLastL1Block_Call) Run(run func(ctx context.Context)) *SynchronizerInterfaceMock_GetLastL1Block_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetLastL1Block_Call) Return(_a0 *synchronizer.L1Block, _a1 error) *SynchronizerInterfaceMock_GetLastL1Block_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetLastL1Block_Call) RunAndReturn(run func(context.Context) (*synchronizer.L1Block, error)) *SynchronizerInterfaceMock_GetLastL1Block_Call { + _c.Call.Return(run) + return _c +} + +// GetLastestVirtualBatchNumber provides a mock function with given fields: ctx +func (_m *SynchronizerInterfaceMock) GetLastestVirtualBatchNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetLastestVirtualBatchNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLastestVirtualBatchNumber' +type SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call struct { + *mock.Call +} + +// GetLastestVirtualBatchNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *SynchronizerInterfaceMock_Expecter) GetLastestVirtualBatchNumber(ctx interface{}) *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call { + return &SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call{Call: _e.mock.On("GetLastestVirtualBatchNumber", ctx)} +} + +func (_c *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call) Run(run func(ctx context.Context)) *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call) Return(_a0 uint64, _a1 error) *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *SynchronizerInterfaceMock_GetLastestVirtualBatchNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetLeafsByL1InfoRoot provides a mock function with given fields: ctx, l1InfoRoot +func (_m *SynchronizerInterfaceMock) GetLeafsByL1InfoRoot(ctx context.Context, l1InfoRoot common.Hash) ([]synchronizer.L1InfoTreeLeaf, error) { + ret := _m.Called(ctx, l1InfoRoot) + + if len(ret) == 0 { + panic("no return value specified for GetLeafsByL1InfoRoot") + } + + var r0 []synchronizer.L1InfoTreeLeaf + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) ([]synchronizer.L1InfoTreeLeaf, error)); ok { + return rf(ctx, l1InfoRoot) + } + if rf, ok := ret.Get(0).(func(context.Context, common.Hash) []synchronizer.L1InfoTreeLeaf); ok { + r0 = rf(ctx, l1InfoRoot) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]synchronizer.L1InfoTreeLeaf) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { + r1 = rf(ctx, l1InfoRoot) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetLeafsByL1InfoRoot' +type SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call struct { + *mock.Call +} + +// GetLeafsByL1InfoRoot is a helper method to define mock.On call +// - ctx context.Context +// - l1InfoRoot common.Hash +func (_e *SynchronizerInterfaceMock_Expecter) GetLeafsByL1InfoRoot(ctx interface{}, l1InfoRoot interface{}) *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call { + return &SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call{Call: _e.mock.On("GetLeafsByL1InfoRoot", ctx, l1InfoRoot)} +} + +func (_c *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call) Run(run func(ctx context.Context, l1InfoRoot common.Hash)) *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(common.Hash)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call) Return(_a0 []synchronizer.L1InfoTreeLeaf, _a1 error) *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call) RunAndReturn(run func(context.Context, common.Hash) ([]synchronizer.L1InfoTreeLeaf, error)) *SynchronizerInterfaceMock_GetLeafsByL1InfoRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetSequenceByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetSequenceByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.SequencedBatches, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetSequenceByBatchNumber") + } + + var r0 *synchronizer.SequencedBatches + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.SequencedBatches, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.SequencedBatches); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.SequencedBatches) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSequenceByBatchNumber' +type SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call struct { + *mock.Call +} + +// GetSequenceByBatchNumber is a helper method to define mock.On call +// - ctx context.Context +// - batchNumber uint64 +func (_e *SynchronizerInterfaceMock_Expecter) GetSequenceByBatchNumber(ctx interface{}, batchNumber interface{}) *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call { + return &SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call{Call: _e.mock.On("GetSequenceByBatchNumber", ctx, batchNumber)} +} + +func (_c *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call) Run(run func(ctx context.Context, batchNumber uint64)) *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call) Return(_a0 *synchronizer.SequencedBatches, _a1 error) *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call) RunAndReturn(run func(context.Context, uint64) (*synchronizer.SequencedBatches, error)) *SynchronizerInterfaceMock_GetSequenceByBatchNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetVirtualBatchByBatchNumber provides a mock function with given fields: ctx, batchNumber +func (_m *SynchronizerInterfaceMock) GetVirtualBatchByBatchNumber(ctx context.Context, batchNumber uint64) (*synchronizer.VirtualBatch, error) { + ret := _m.Called(ctx, batchNumber) + + if len(ret) == 0 { + panic("no return value specified for GetVirtualBatchByBatchNumber") + } + + var r0 *synchronizer.VirtualBatch + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (*synchronizer.VirtualBatch, error)); ok { + return rf(ctx, batchNumber) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) *synchronizer.VirtualBatch); ok { + r0 = rf(ctx, batchNumber) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*synchronizer.VirtualBatch) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, batchNumber) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetVirtualBatchByBatchNumber' +type SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call struct { + *mock.Call +} + +// GetVirtualBatchByBatchNumber is a helper method to define mock.On call +// - ctx context.Context +// - batchNumber uint64 +func (_e *SynchronizerInterfaceMock_Expecter) GetVirtualBatchByBatchNumber(ctx interface{}, batchNumber interface{}) *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call { + return &SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call{Call: _e.mock.On("GetVirtualBatchByBatchNumber", ctx, batchNumber)} +} + +func (_c *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call) Run(run func(ctx context.Context, batchNumber uint64)) *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call) Return(_a0 *synchronizer.VirtualBatch, _a1 error) *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call) RunAndReturn(run func(context.Context, uint64) (*synchronizer.VirtualBatch, error)) *SynchronizerInterfaceMock_GetVirtualBatchByBatchNumber_Call { + _c.Call.Return(run) + return _c +} + +// IsSynced provides a mock function with no fields +func (_m *SynchronizerInterfaceMock) IsSynced() bool { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for IsSynced") + } + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// SynchronizerInterfaceMock_IsSynced_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSynced' +type SynchronizerInterfaceMock_IsSynced_Call struct { + *mock.Call +} + +// IsSynced is a helper method to define mock.On call +func (_e *SynchronizerInterfaceMock_Expecter) IsSynced() *SynchronizerInterfaceMock_IsSynced_Call { + return &SynchronizerInterfaceMock_IsSynced_Call{Call: _e.mock.On("IsSynced")} +} + +func (_c *SynchronizerInterfaceMock_IsSynced_Call) Run(run func()) *SynchronizerInterfaceMock_IsSynced_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_IsSynced_Call) Return(_a0 bool) *SynchronizerInterfaceMock_IsSynced_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *SynchronizerInterfaceMock_IsSynced_Call) RunAndReturn(run func() bool) *SynchronizerInterfaceMock_IsSynced_Call { + _c.Call.Return(run) + return _c +} + +// SetCallbackOnReorgDone provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnReorgDone(callback func(synchronizer.ReorgExecutionResult)) { + _m.Called(callback) +} + +// SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCallbackOnReorgDone' +type SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call struct { + *mock.Call +} + +// SetCallbackOnReorgDone is a helper method to define mock.On call +// - callback func(synchronizer.ReorgExecutionResult) +func (_e *SynchronizerInterfaceMock_Expecter) SetCallbackOnReorgDone(callback interface{}) *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call { + return &SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call{Call: _e.mock.On("SetCallbackOnReorgDone", callback)} +} + +func (_c *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call) Run(run func(callback func(synchronizer.ReorgExecutionResult))) *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(synchronizer.ReorgExecutionResult))) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call) Return() *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call { + _c.Call.Return() + return _c +} + +func (_c *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call) RunAndReturn(run func(func(synchronizer.ReorgExecutionResult))) *SynchronizerInterfaceMock_SetCallbackOnReorgDone_Call { + _c.Run(run) + return _c +} + +// SetCallbackOnRollbackBatches provides a mock function with given fields: callback +func (_m *SynchronizerInterfaceMock) SetCallbackOnRollbackBatches(callback func(synchronizer.RollbackBatchesData)) { + _m.Called(callback) +} + +// SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCallbackOnRollbackBatches' +type SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call struct { + *mock.Call +} + +// SetCallbackOnRollbackBatches is a helper method to define mock.On call +// - callback func(synchronizer.RollbackBatchesData) +func (_e *SynchronizerInterfaceMock_Expecter) SetCallbackOnRollbackBatches(callback interface{}) *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call { + return &SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call{Call: _e.mock.On("SetCallbackOnRollbackBatches", callback)} +} + +func (_c *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call) Run(run func(callback func(synchronizer.RollbackBatchesData))) *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(func(synchronizer.RollbackBatchesData))) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call) Return() *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call { + _c.Call.Return() + return _c +} + +func (_c *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call) RunAndReturn(run func(func(synchronizer.RollbackBatchesData))) *SynchronizerInterfaceMock_SetCallbackOnRollbackBatches_Call { + _c.Run(run) + return _c +} + +// Stop provides a mock function with no fields +func (_m *SynchronizerInterfaceMock) Stop() { + _m.Called() +} + +// SynchronizerInterfaceMock_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop' +type SynchronizerInterfaceMock_Stop_Call struct { + *mock.Call +} + +// Stop is a helper method to define mock.On call +func (_e *SynchronizerInterfaceMock_Expecter) Stop() *SynchronizerInterfaceMock_Stop_Call { + return &SynchronizerInterfaceMock_Stop_Call{Call: _e.mock.On("Stop")} +} + +func (_c *SynchronizerInterfaceMock_Stop_Call) Run(run func()) *SynchronizerInterfaceMock_Stop_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_Stop_Call) Return() *SynchronizerInterfaceMock_Stop_Call { + _c.Call.Return() + return _c +} + +func (_c *SynchronizerInterfaceMock_Stop_Call) RunAndReturn(run func()) *SynchronizerInterfaceMock_Stop_Call { + _c.Run(run) + return _c +} + +// Sync provides a mock function with given fields: returnOnSync +func (_m *SynchronizerInterfaceMock) Sync(returnOnSync bool) error { + ret := _m.Called(returnOnSync) + + if len(ret) == 0 { + panic("no return value specified for Sync") + } + + var r0 error + if rf, ok := ret.Get(0).(func(bool) error); ok { + r0 = rf(returnOnSync) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SynchronizerInterfaceMock_Sync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sync' +type SynchronizerInterfaceMock_Sync_Call struct { + *mock.Call +} + +// Sync is a helper method to define mock.On call +// - returnOnSync bool +func (_e *SynchronizerInterfaceMock_Expecter) Sync(returnOnSync interface{}) *SynchronizerInterfaceMock_Sync_Call { + return &SynchronizerInterfaceMock_Sync_Call{Call: _e.mock.On("Sync", returnOnSync)} +} + +func (_c *SynchronizerInterfaceMock_Sync_Call) Run(run func(returnOnSync bool)) *SynchronizerInterfaceMock_Sync_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(bool)) + }) + return _c +} + +func (_c *SynchronizerInterfaceMock_Sync_Call) Return(_a0 error) *SynchronizerInterfaceMock_Sync_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *SynchronizerInterfaceMock_Sync_Call) RunAndReturn(run func(bool) error) *SynchronizerInterfaceMock_Sync_Call { + _c.Call.Return(run) + return _c +} + +// NewSynchronizerInterfaceMock creates a new instance of SynchronizerInterfaceMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewSynchronizerInterfaceMock(t interface { + mock.TestingT + Cleanup(func()) +}) *SynchronizerInterfaceMock { + mock := &SynchronizerInterfaceMock{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/test/bats/fep/access-list-e2e.bats b/test/bats/access-list-e2e.bats similarity index 98% rename from test/bats/fep/access-list-e2e.bats rename to test/bats/access-list-e2e.bats index cc621c109..bf8c7533e 100644 --- a/test/bats/fep/access-list-e2e.bats +++ b/test/bats/access-list-e2e.bats @@ -1,6 +1,6 @@ setup() { - load '../helpers/common-setup' - load '../helpers/common' + load 'helpers/common-setup' + load 'helpers/common' _common_setup diff --git a/test/bats/fep/basic-e2e.bats b/test/bats/basic-e2e.bats similarity index 99% rename from test/bats/fep/basic-e2e.bats rename to test/bats/basic-e2e.bats index d977f4bcb..0fbb7f40b 100644 --- a/test/bats/fep/basic-e2e.bats +++ b/test/bats/basic-e2e.bats @@ -1,6 +1,6 @@ setup() { - load '../helpers/common-setup' - load '../helpers/common' + load 'helpers/common-setup' + load 'helpers/common' _common_setup diff --git a/test/bats/fep/bridge-e2e.bats b/test/bats/bridge-e2e.bats similarity index 98% rename from test/bats/fep/bridge-e2e.bats rename to test/bats/bridge-e2e.bats index d92032979..b632832e1 100644 --- a/test/bats/fep/bridge-e2e.bats +++ b/test/bats/bridge-e2e.bats @@ -1,7 +1,7 @@ setup() { - load '../helpers/common-setup' - load '../helpers/common' - load '../helpers/lxly-bridge' + load 'helpers/common-setup' + load 'helpers/common' + load 'helpers/lxly-bridge' _common_setup diff --git a/test/bats/fep/e2e.bats b/test/bats/e2e.bats similarity index 57% rename from test/bats/fep/e2e.bats rename to test/bats/e2e.bats index a468e7aa3..e53bba1fb 100644 --- a/test/bats/fep/e2e.bats +++ b/test/bats/e2e.bats @@ -1,11 +1,11 @@ setup() { - load '../helpers/common-setup' + load 'helpers/common-setup' _common_setup } @test "Verify batches" { echo "Waiting 10 minutes to get some verified batch...." - run $PROJECT_ROOT/../scripts/batch_verification_monitor.sh 0 600 + run $PROJECT_ROOT/scripts/batch_verification_monitor.sh 0 600 assert_success } diff --git a/test/bats/helpers/aggsender.bash b/test/bats/helpers/aggsender.bash deleted file mode 100644 index 6a7399a94..000000000 --- a/test/bats/helpers/aggsender.bash +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -function wait_to_settled_certificate_containing_global_index(){ - local _l2_pp1_cdk_node_url=$1 - local _global_index=$2 - local _check_frequency=${3:-30} - local _timeout=${4:-300} - echo "... waiting for certificate with global index $_global_index" >&3 - run_with_timeout "settle cert for $_global_index" $_check_frequency $_timeout $aggsender_find_imported_bridge $_l2_pp1_cdk_node_url $_global_index -} \ No newline at end of file diff --git a/test/bats/helpers/common-multi_cdk-setup.bash b/test/bats/helpers/common-multi_cdk-setup.bash deleted file mode 100644 index 2758c9f75..000000000 --- a/test/bats/helpers/common-multi_cdk-setup.bash +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash - -_common_multi_setup() { - load '../helpers/common-setup' - _common_setup - # generated with cast wallet new - readonly target_address=0xbecE3a31343c6019CDE0D5a4dF2AF8Df17ebcB0f - readonly target_private_key=0x51caa196504216b1730280feb63ddd8c5ae194d13e57e58d559f1f1dc3eda7c9 - - kurtosis service exec $enclave contracts-001 "cat /opt/zkevm/combined-001.json" | tail -n +2 | jq '.' > combined-001.json - kurtosis service exec $enclave contracts-002 "cat /opt/zkevm/combined-002.json" | tail -n +2 | jq '.' > combined-002.json - kurtosis service exec $enclave contracts-002 "cat /opt/zkevm-contracts/deployment/v2/create_rollup_parameters.json" | tail -n +2 | jq -r '.gasTokenAddress' > gas-token-address.json - - readonly private_key="0x12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625" - readonly eth_address=$(cast wallet address --private-key $private_key) - readonly l1_rpc_url=http://$(kurtosis port print $enclave el-1-geth-lighthouse rpc) - readonly l2_pp1_url=$(kurtosis port print $enclave cdk-erigon-rpc-001 rpc) - readonly l2_pp2_url=$(kurtosis port print $enclave cdk-erigon-rpc-002 rpc) - readonly bridge_address=$(cat combined-001.json | jq -r .polygonZkEVMBridgeAddress) - readonly pol_address=$(cat combined-001.json | jq -r .polTokenAddress) - readonly gas_token_address=$(&3 - echo "=== POL address=$pol_address ===" >&3 - echo "=== Gas token address=$gas_token_address ===" >&3 - echo "=== L1 network id=$l1_rpc_network_id ===" >&3 - echo "=== L2 PP1 network id=$l2_pp1b_network_id ===" >&3 - echo "=== L2 PP2 network id=$l2_pp2b_network_id ===" >&3 - echo "=== L1 RPC URL=$l1_rpc_url ===" >&3 - echo "=== L2 PP1 URL=$l2_pp1_url ===" >&3 - echo "=== L2 PP2 URL=$l2_pp2_url ===" >&3 - echo "=== L2 PP1B URL=$l2_pp1b_url ===" >&3 - echo "=== L2 PP2B URL=$l2_pp2b_url ===" >&3 - -} - -add_cdk_network2_to_agglayer(){ - echo "=== Checking if network 2 is in agglayer ===" >&3 - local _prev=$(kurtosis service exec $enclave agglayer "grep \"2 = \" /etc/zkevm/agglayer-config.toml || true" | tail -n +2) - if [ ! -z "$_prev" ]; then - echo "Network 2 already added to agglayer" >&3 - return - fi - echo "=== Adding network 2 to agglayer === ($_prev)" >&3 - kurtosis service exec $enclave agglayer "sed -i 's/\[proof\-signers\]/2 = \"http:\/\/cdk-erigon-rpc-002:8123\"\n\[proof-signers\]/i' /etc/zkevm/agglayer-config.toml" - kurtosis service stop $enclave agglayer - kurtosis service start $enclave agglayer -} - -fund_claim_tx_manager(){ - echo "=== Funding bridge auto-claim ===" >&3 - cast send --legacy --value 100ether --rpc-url $l2_pp1_url --private-key $private_key 0x5f5dB0D4D58310F53713eF4Df80ba6717868A9f8 - cast send --legacy --value 100ether --rpc-url $l2_pp2_url --private-key $private_key 0x93F63c24735f45Cd0266E87353071B64dd86bc05 -} - - -mint_pol_token(){ - echo "=== Minting POL ===" >&3 - cast send \ - --rpc-url $l1_rpc_url \ - --private-key $private_key \ - $pol_address \ - "$mint_fn_sig" \ - $eth_address 10000000000000000000000 - # Allow bridge to spend it - cast send \ - --rpc-url $l1_rpc_url \ - --private-key $private_key \ - $pol_address \ - "$approve_fn_sig" \ - $bridge_address 10000000000000000000000 -} diff --git a/test/bats/pp-multi/bridge-l2_to_l2-e2e.bats b/test/bats/pp-multi/bridge-l2_to_l2-e2e.bats deleted file mode 100644 index a9a904b6f..000000000 --- a/test/bats/pp-multi/bridge-l2_to_l2-e2e.bats +++ /dev/null @@ -1,82 +0,0 @@ -# based on: https://github.com/0xPolygon/kurtosis-cdk/blob/jhilliard/multi-pp-testing/multi-pp-test.sh.md - -setup() { - load '../helpers/common-multi_cdk-setup' - _common_multi_setup - load '../helpers/common' - load '../helpers/lxly-bridge' - load '../helpers/aggsender' - - if [ ! -f $aggsender_find_imported_bridge ]; then - echo "missing required tool: $aggsender_find_imported_bridge" >&3 - return 1 - fi - - add_cdk_network2_to_agglayer - fund_claim_tx_manager - mint_pol_token - - ether_value=${ETHER_VALUE:-"0.0200000054"} - amount=$(cast to-wei $ether_value ether) - native_token_addr="0x0000000000000000000000000000000000000000" - readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" - # Params for lxly-bridge functions - is_forced=${IS_FORCED:-"true"} - bridge_addr=$bridge_address - meta_bytes=${META_BYTES:-"0x1234"} - destination_addr=$target_address - timeout="600" - claim_frequency="30" - - gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") -} - -@test "Test L2 to L2 bridge" { - echo "=== Running LxLy bridge eth L1 to L2(PP1) amount:$amount" >&3 - destination_net=$l2_pp1b_network_id - bridge_asset "$native_token_addr" "$l1_rpc_url" - bridge_tx_hash_pp1=$bridge_tx_hash - - echo "=== Running LxLy bridge eth L1 to L2(PP2) amount:$amount" >&3 - destination_net=$l2_pp2b_network_id - bridge_asset "$native_token_addr" "$l1_rpc_url" - bridge_tx_hash_pp2=$bridge_tx_hash - - echo "=== Running LxLy claim L1 to L2(PP1) for $bridge_tx_hash_pp1" >&3 - run claim_tx_hash "$timeout" "$bridge_tx_hash_pp1" "$destination_addr" "$l2_pp1_url" "$l2_pp1b_url" - assert_success - - echo "=== Running LxLy claim L1 to L2(PP2) for $bridge_tx_hash_pp2" >&3 - run claim_tx_hash "$timeout" "$bridge_tx_hash_pp2" "$destination_addr" "$l2_pp2_url" "$l2_pp2b_url" - assert_success - - - # reduce eth amount - amount=1234567 - echo "=== Running LxLy bridge L2(PP2) to L2(PP1) amount:$amount" >&3 - destination_net=$l2_pp1b_network_id - meta_bytes="0xbeef" - bridge_asset "$native_token_addr" "$l2_pp2_url" - - echo "=== Running LxLy claim L2(PP2) to L2(PP1) for: $bridge_tx_hash" >&3 - claim_tx_hash "$timeout" "$bridge_tx_hash" "$destination_addr" "$l2_pp1_url" "$l2_pp2b_url" - echo "... deposit [$global_index]" - global_index_pp2_to_pp1="$global_index" - - # Now we need to do a bridge on L2(PP1) to trigger a certificate: - ether_value=${ETHER_VALUE:-"0.0100000054"} - amount=$(cast to-wei $ether_value ether) - echo "=== Running LxLy bridge eth L2(PP1) to L1 (trigger a certificate on PP1) amount:$amount" >&3 - destination_net=$l1_rpc_network_id - meta_bytes="0xabcd" - bridge_asset "$native_token_addr" "$l2_pp1_url" - - echo "=== Running LxLy claim L2(PP1) to L1 for $bridge_tx_hash" >&3 - run claim_tx_hash "$timeout" "$bridge_tx_hash" "$destination_addr" "$l1_rpc_url" "$l2_pp1b_url" - assert_success - - echo "=== Waiting to settled certificate with imported bridge for global_index: $global_index_pp2_to_pp1" - wait_to_settled_certificate_containing_global_index $l2_pp1_cdk_node_url $global_index_pp2_to_pp1 - -} diff --git a/test/bats/pp/bridge-e2e-msg.bats b/test/bats/pp/bridge-e2e-msg.bats deleted file mode 100644 index b55259156..000000000 --- a/test/bats/pp/bridge-e2e-msg.bats +++ /dev/null @@ -1,68 +0,0 @@ -setup() { - load '../../helpers/common-setup' - _common_setup - load '../../helpers/common' - load '../../helpers/lxly-bridge-test' - - if [ -z "$BRIDGE_ADDRESS" ]; then - local combined_json_file="/opt/zkevm/combined.json" - echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - - # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress - combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) - bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) - BRIDGE_ADDRESS=$bridge_default_address - fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 - - readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" - destination_net=${DESTINATION_NET:-"1"} - destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - ether_value=${ETHER_VALUE:-"0.0200000054"} - amount=$(cast to-wei $ether_value ether) - readonly native_token_addr=${NATIVE_TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} - if [[ -n "$GAS_TOKEN_ADDR" ]]; then - echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 - gas_token_addr="$GAS_TOKEN_ADDR" - else - echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 - readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json - run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" - assert_success - assert_output --regexp "0x[a-fA-F0-9]{40}" - gas_token_addr=$output - fi - readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=$BRIDGE_ADDRESS - readonly meta_bytes=${META_BYTES:-"0x1234"} - - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} - - readonly dry_run=${DRY_RUN:-"false"} - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') - gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") - readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) -} - - -@test "transfer message" { - echo "====== bridgeMessage L1 -> L2" >&3 - destination_addr=$sender_addr - destination_net=$l2_rpc_network_id - run bridge_message "$native_token_addr" "$l1_rpc_url" - assert_success - - echo "====== Claim in L2" >&3 - timeout="120" - claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" "bridgeMessage" - assert_success - - echo "====== bridgeMessage L2->L1" >&3 - destination_net=0 - run bridge_message "$destination_addr" "$l2_rpc_url" - assert_success -} \ No newline at end of file diff --git a/test/bats/pp/bridge-e2e.bats b/test/bats/pp/bridge-e2e.bats deleted file mode 100644 index 1f358315b..000000000 --- a/test/bats/pp/bridge-e2e.bats +++ /dev/null @@ -1,73 +0,0 @@ -setup() { - load '../helpers/common-setup' - load '../helpers/common' - load '../helpers/lxly-bridge' - - _common_setup - - if [ -z "$BRIDGE_ADDRESS" ]; then - local combined_json_file="/opt/zkevm/combined.json" - echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - - # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress - combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) - bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) - BRIDGE_ADDRESS=$bridge_default_address - fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 - - readonly sender_private_key=${SENDER_PRIVATE_KEY:-"12d7de8621a77640c9241b2595ba78ce443d05e94090365ab3bb5e19df82c625"} - readonly sender_addr="$(cast wallet address --private-key $sender_private_key)" - destination_net=${DESTINATION_NET:-"1"} - destination_addr=${DESTINATION_ADDRESS:-"0x0bb7AA0b4FdC2D2862c088424260e99ed6299148"} - ether_value=${ETHER_VALUE:-"0.0200000054"} - amount=$(cast to-wei $ether_value ether) - readonly native_token_addr=${NATIVE_TOKEN_ADDRESS:-"0x0000000000000000000000000000000000000000"} - if [[ -n "$GAS_TOKEN_ADDR" ]]; then - echo "Using provided GAS_TOKEN_ADDR: $GAS_TOKEN_ADDR" >&3 - gas_token_addr="$GAS_TOKEN_ADDR" - else - echo "GAS_TOKEN_ADDR not provided, retrieving from rollup parameters file." >&3 - readonly rollup_params_file=/opt/zkevm/create_rollup_parameters.json - run bash -c "$contracts_service_wrapper 'cat $rollup_params_file' | tail -n +2 | jq -r '.gasTokenAddress'" - assert_success - assert_output --regexp "0x[a-fA-F0-9]{40}" - gas_token_addr=$output - fi - readonly is_forced=${IS_FORCED:-"true"} - readonly bridge_addr=$BRIDGE_ADDRESS - readonly meta_bytes=${META_BYTES:-"0x1234"} - - readonly l1_rpc_url=${L1_ETH_RPC_URL:-"$(kurtosis port print $enclave el-1-geth-lighthouse rpc)"} - readonly bridge_api_url=${BRIDGE_API_URL:-"$(kurtosis port print $enclave zkevm-bridge-service-001 rpc)"} - - readonly dry_run=${DRY_RUN:-"false"} - readonly l1_rpc_network_id=$(cast call --rpc-url $l1_rpc_url $bridge_addr 'networkID() (uint32)') - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') - gas_price=$(cast gas-price --rpc-url "$l2_rpc_url") - readonly weth_token_addr=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'WETHToken()' | cast parse-bytes32-address) -} - -@test "Native gas token deposit to WETH" { - destination_addr=$sender_addr - local initial_receiver_balance=$(cast call --rpc-url "$l2_rpc_url" "$weth_token_addr" "$balance_of_fn_sig" "$destination_addr" | awk '{print $1}') - echo "Initial receiver balance of native token on L2 $initial_receiver_balance" >&3 - - echo "=== Running LxLy deposit on L1 to network: $l2_rpc_network_id native_token: $native_token_addr" >&3 - - destination_net=$l2_rpc_network_id - run bridge_asset "$native_token_addr" "$l1_rpc_url" - assert_success - - echo "=== Running LxLy claim on L2" >&3 - timeout="120" - claim_frequency="10" - run wait_for_claim "$timeout" "$claim_frequency" "$l2_rpc_url" - assert_success - - echo "=== bridgeAsset L2 WETH: $weth_token_addr to L1 ETH" >&3 - destination_addr=$sender_addr - destination_net=0 - run bridge_asset "$weth_token_addr" "$l2_rpc_url" - assert_success -} diff --git a/test/bats/pp/e2e-pp.bats b/test/bats/pp/e2e-pp.bats deleted file mode 100644 index 4ef831e7b..000000000 --- a/test/bats/pp/e2e-pp.bats +++ /dev/null @@ -1,26 +0,0 @@ -setup() { - load '../helpers/common-setup' - - _common_setup - - if [ -z "$BRIDGE_ADDRESS" ]; then - local combined_json_file="/opt/zkevm/combined.json" - echo "BRIDGE_ADDRESS env variable is not provided, resolving the bridge address from the Kurtosis CDK '$combined_json_file'" >&3 - - # Fetching the combined JSON output and filtering to get polygonZkEVMBridgeAddress - combined_json_output=$($contracts_service_wrapper "cat $combined_json_file" | tail -n +2) - bridge_default_address=$(echo "$combined_json_output" | jq -r .polygonZkEVMBridgeAddress) - BRIDGE_ADDRESS=$bridge_default_address - fi - echo "Bridge address=$BRIDGE_ADDRESS" >&3 -} - -@test "Verify certificate settlement" { - echo "Waiting 10 minutes to get some settle certificate...." >&3 - - readonly bridge_addr=$BRIDGE_ADDRESS - readonly l2_rpc_network_id=$(cast call --rpc-url $l2_rpc_url $bridge_addr 'networkID() (uint32)') - - run $PROJECT_ROOT/../scripts/agglayer_certificates_monitor.sh 1 600 $l2_rpc_network_id - assert_success -} diff --git a/test/combinations/fork12-pessimistic-multi-attach-second-cdk.yml b/test/combinations/fork12-pessimistic-multi-attach-second-cdk.yml deleted file mode 100644 index 2b02f6023..000000000 --- a/test/combinations/fork12-pessimistic-multi-attach-second-cdk.yml +++ /dev/null @@ -1,40 +0,0 @@ -deployment_stages: - deploy_l1: false - deploy_agglayer: false - -args: - deployment_suffix: "-002" - zkevm_rollup_chain_id: 20202 - zkevm_rollup_id: 2 - - # The following accounts have been generated using the following command: - # polycli wallet inspect --mnemonic 'bless share truly shadow primary sun relief border van gallery stairs edit reflect gentle athlete main device smile response rescue mirror floor say people' --addresses 9 | tee keys.txt | jq -r '.Addresses[] | [.ETHAddress, .HexPrivateKey] | @tsv' | awk 'BEGIN{split("sequencer,aggregator,claimtxmanager,timelock,admin,loadtest,agglayer,dac,proofsigner",roles,",")} {print "zkevm_l2_" roles[NR] "_address: \"" $1 "\""; print "zkevm_l2_" roles[NR] "_private_key: \"0x" $2 "\"\n"}' - # Note that admin and agglayer accounts have been removed since we're using the default accounts. - zkevm_l2_sequencer_address: "0xA670342930242407b9984e467353044f8472055e" - zkevm_l2_sequencer_private_key: "0x902ed4ce26b536617a4f26da5e0cd0ef61b514a076b4bd766d6ab8b97efbb8c1" - zkevm_l2_aggregator_address: "0xfC419a9d9Fe0DfA4Cf9971AcD1Fbcd356DD768FD" - zkevm_l2_aggregator_private_key: "0xa70db9fb4b84a6ba18c03cd2266116dd110538d6c4c88e67ca35a29b910da25d" - zkevm_l2_claimtxmanager_address: "0x93F63c24735f45Cd0266E87353071B64dd86bc05" - zkevm_l2_claimtxmanager_private_key: "0x38718f22097afba13be48d818964326c9c5c48133f51e3c3bfd6faf05f813b34" - zkevm_l2_timelock_address: "0xDB22C6f61A82d6AA6d3607289fC93774AC09413a" - zkevm_l2_timelock_private_key: "0xae4a69010583a09709baa563fa66f9e6f2dacf9e9c84b89932406b9a0521b561" - zkevm_l2_loadtest_address: "0xD5278fC3Dc72A226d5C04c3d2C85fd397A46fA08" - zkevm_l2_loadtest_private_key: "0xef4db4f97684b8307adc332ed6c1bc82d66d160f08e7427d082d66a23889625e" - zkevm_l2_dac_address: "0xDa07AAD7226B136bc24157Dc4Ff5A813490E20D0" - zkevm_l2_dac_private_key: "0x992c9ab11d5eab6b6c2634b8bb0b85f3d8d1acf25024dc99c359cb2afd9b40a7" - zkevm_l2_proofsigner_address: "0xf1a661D7b601Ec46a040f57193cC99aB8c4132FA" - zkevm_l2_proofsigner_private_key: "0xc7fe3a006d75ba9326d9792523385abb49057c66aee0b8b4248821a89713f975" - - - cdk_node_image: cdk:latest - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.61.2 - zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 - additional_services: [] - consensus_contract_type: pessimistic - sequencer_type: erigon - erigon_strict_mode: false - gas_token_enabled: false - zkevm_use_real_verifier: true - enable_normalcy: true - verifier_program_vkey: 0x00766aa16a6efe4ac05c0fe21d4b50f9631dbd1a2663a982da861427085ea2ea - diff --git a/test/combinations/fork12-pessimistic-multi.yml b/test/combinations/fork12-pessimistic-multi.yml deleted file mode 100644 index 0b917569b..000000000 --- a/test/combinations/fork12-pessimistic-multi.yml +++ /dev/null @@ -1,17 +0,0 @@ -args: - cdk_node_image: cdk:latest - agglayer_image: ghcr.io/agglayer/agglayer:0.2.0-rc.20 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.61.2 - zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 - zkevm_bridge_proxy_image: haproxy:3.1-bookworm - zkevm_bridge_service_image: hermeznetwork/zkevm-bridge-service:v0.6.0-RC5 - additional_services: [] - consensus_contract_type: pessimistic - sequencer_type: erigon - erigon_strict_mode: false - gas_token_enabled: false - zkevm_use_real_verifier: true - enable_normalcy: true - verifier_program_vkey: 0x00766aa16a6efe4ac05c0fe21d4b50f9631dbd1a2663a982da861427085ea2ea - agglayer_prover_sp1_key: {{.agglayer_prover_sp1_key}} - diff --git a/test/combinations/fork12-pessimistic.yml b/test/combinations/fork12-pessimistic.yml deleted file mode 100644 index 8e68a4b96..000000000 --- a/test/combinations/fork12-pessimistic.yml +++ /dev/null @@ -1,15 +0,0 @@ -args: - agglayer_image: ghcr.io/agglayer/agglayer:0.2.0-rc.20 - cdk_erigon_node_image: hermeznetwork/cdk-erigon:v2.61.2 - cdk_node_image: cdk - zkevm_bridge_proxy_image: haproxy:3.1-bookworm - zkevm_bridge_service_image: hermeznetwork/zkevm-bridge-service:v0.6.0-RC5 - zkevm_bridge_ui_image: leovct/zkevm-bridge-ui:multi-network - zkevm_contracts_image: leovct/zkevm-contracts:v9.0.0-rc.3-pp-fork.12-patch.1 - additional_services: [] - consensus_contract_type: pessimistic - sequencer_type: erigon - erigon_strict_mode: false - gas_token_enabled: true - agglayer_prover_sp1_key: {{.agglayer_prover_sp1_key}} - enable_normalcy: true diff --git a/test/config/kurtosis-cdk-node-config.toml.template b/test/config/kurtosis-cdk-node-config.toml.template index 45b938a2e..6729f7b71 100644 --- a/test/config/kurtosis-cdk-node-config.toml.template +++ b/test/config/kurtosis-cdk-node-config.toml.template @@ -53,8 +53,3 @@ Outputs = ["stderr"] VerifyProofInterval = "10s" GasOffset = 150000 SettlementBackend = "agglayer" - -[AggSender] -SaveCertificatesToFilesPath = "{{.zkevm_path_rw_data}}/" - - diff --git a/test/helpers/e2e.go b/test/helpers/e2e.go index 7cc03babf..dfc0ce5cb 100644 --- a/test/helpers/e2e.go +++ b/test/helpers/e2e.go @@ -10,15 +10,12 @@ import ( "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/globalexitrootmanagerl2sovereignchain" "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmbridgev2" "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmglobalexitrootv2" - "github.com/0xPolygon/cdk/aggoracle" - "github.com/0xPolygon/cdk/aggoracle/chaingersender" - "github.com/0xPolygon/cdk/bridgesync" - cfgTypes "github.com/0xPolygon/cdk/config/types" - "github.com/0xPolygon/cdk/etherman" - "github.com/0xPolygon/cdk/l1infotreesync" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/reorgdetector" "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" + "github.com/agglayer/aggkit/bridgesync" + aggkitTypes "github.com/agglayer/aggkit/config/types" + aggkitetherman "github.com/agglayer/aggkit/etherman" + "github.com/agglayer/aggkit/l1infotreesync" + "github.com/agglayer/aggkit/reorgdetector" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -34,13 +31,6 @@ const ( periodRetry = time.Millisecond * 100 ) -type AggoracleWithEVMChain struct { - L1Environment - L2Environment - AggOracle *aggoracle.AggOracle - NetworkIDL2 uint32 -} - // CommonEnvironment contains common setup results used in both L1 and L2 network setups. type CommonEnvironment struct { SimBackend *simulated.Backend @@ -59,39 +49,25 @@ type L1Environment struct { InfoTreeSync *l1infotreesync.L1InfoTreeSync } -// L2Environment contains setup results for L1 network. +// L2Environment contains setup results for L2 network. type L2Environment struct { CommonEnvironment GERContract *globalexitrootmanagerl2sovereignchain.Globalexitrootmanagerl2sovereignchain - AggoracleSender aggoracle.ChainSender EthTxManagerMock *EthTxManagerMock + NetworkID uint32 } -// NewE2EEnvWithEVML2 creates a new E2E environment with EVM L1 and L2 chains. -func NewE2EEnvWithEVML2(t *testing.T) *AggoracleWithEVMChain { +// NewL1EnvWithL2EVM creates a new E2E environment with EVM L1 and L2 chains. +func NewL1EnvWithL2EVM(t *testing.T) (*L1Environment, *L2Environment) { t.Helper() - ctx := context.Background() - // Setup L1 + // Setup L1 environment l1Setup := L1Setup(t) - // Setup L2 EVM - l2Setup := L2Setup(t) + // Setup L2 environment + l2Setup := L2Setup(t, rollupID) - oracle, err := aggoracle.New( - log.GetDefaultLogger(), l2Setup.AggoracleSender, - l1Setup.SimBackend.Client(), l1Setup.InfoTreeSync, - etherman.LatestBlock, time.Millisecond*20, //nolint:mnd - ) - require.NoError(t, err) - go oracle.Start(ctx) - - return &AggoracleWithEVMChain{ - L1Environment: *l1Setup, - L2Environment: *l2Setup, - AggOracle: oracle, - NetworkIDL2: rollupID, - } + return l1Setup, l2Setup } // L1Setup creates a new L1 environment. @@ -107,7 +83,7 @@ func L1Setup(t *testing.T) *L1Environment { dbPathReorgDetectorL1 := path.Join(t.TempDir(), "ReorgDetectorL1.sqlite") rdL1, err := reorgdetector.New(l1Client.Client(), reorgdetector.Config{ DBPath: dbPathReorgDetectorL1, - CheckReorgsInterval: cfgTypes.Duration{Duration: time.Millisecond * 100}, //nolint:mnd + CheckReorgsInterval: aggkitTypes.Duration{Duration: time.Millisecond * 100}, //nolint:mnd }, reorgdetector.L1) require.NoError(t, err) go rdL1.Start(ctx) //nolint:errcheck @@ -117,11 +93,11 @@ func L1Setup(t *testing.T) *L1Environment { l1InfoTreeSync, err := l1infotreesync.New( ctx, dbPathL1InfoTreeSync, gerL1Addr, common.Address{}, - syncBlockChunkSize, etherman.LatestBlock, + syncBlockChunkSize, aggkitetherman.LatestBlock, rdL1, l1Client.Client(), time.Millisecond, 0, periodRetry, retries, l1infotreesync.FlagAllowWrongContractsAddrs, - etherman.SafeBlock, + aggkitetherman.SafeBlock, ) require.NoError(t, err) @@ -141,9 +117,9 @@ func L1Setup(t *testing.T) *L1Environment { dbPathBridgeSyncL1 := path.Join(t.TempDir(), "BridgeSyncL1.sqlite") bridgeL1Sync, err := bridgesync.NewL1( ctx, dbPathBridgeSyncL1, bridgeL1Addr, - syncBlockChunks, etherman.LatestBlock, rdL1, testClient, + syncBlockChunks, aggkitetherman.LatestBlock, rdL1, testClient, initialBlock, waitForNewBlocksPeriod, retryPeriod, - retriesCount, originNetwork, false, etherman.SafeBlock) + retriesCount, originNetwork, false) require.NoError(t, err) go bridgeL1Sync.Start(ctx) @@ -164,7 +140,7 @@ func L1Setup(t *testing.T) *L1Environment { } // L2Setup creates a new L2 environment. -func L2Setup(t *testing.T) *L2Environment { +func L2Setup(t *testing.T, networkID uint32) *L2Environment { t.Helper() l2Client, authL2, gerL2Addr, gerL2Contract, @@ -172,19 +148,13 @@ func L2Setup(t *testing.T) *L2Environment { ethTxManagerMock := NewEthTxManMock(t, l2Client, authL2) - const gerCheckFrequency = time.Millisecond * 50 - sender, err := chaingersender.NewEVMChainGERSender( - log.GetDefaultLogger(), gerL2Addr, l2Client.Client(), - ethTxManagerMock, 0, gerCheckFrequency, - ) - require.NoError(t, err) ctx := context.Background() // Reorg detector dbPathReorgL2 := path.Join(t.TempDir(), "ReorgDetectorL2.sqlite") rdL2, err := reorgdetector.New(l2Client.Client(), reorgdetector.Config{ DBPath: dbPathReorgL2, - CheckReorgsInterval: cfgTypes.Duration{Duration: time.Millisecond * 100}}, //nolint:mnd + CheckReorgsInterval: aggkitTypes.Duration{Duration: time.Millisecond * 100}}, //nolint:mnd reorgdetector.L2, ) require.NoError(t, err) @@ -205,9 +175,9 @@ func L2Setup(t *testing.T) *L2Environment { bridgeL2Sync, err := bridgesync.NewL2( ctx, dbPathL2BridgeSync, bridgeL2Addr, syncBlockChunks, - etherman.LatestBlock, rdL2, testClient, + aggkitetherman.LatestBlock, rdL2, testClient, initialBlock, waitForNewBlocksPeriod, retryPeriod, - retriesCount, originNetwork, false, etherman.LatestBlock) + retriesCount, originNetwork, false) require.NoError(t, err) go bridgeL2Sync.Start(ctx) @@ -223,8 +193,8 @@ func L2Setup(t *testing.T) *L2Environment { BridgeSync: bridgeL2Sync, }, GERContract: gerL2Contract, - AggoracleSender: sender, EthTxManagerMock: ethTxManagerMock, + NetworkID: networkID, } } diff --git a/test/helpers/ethtxmanmock_e2e.go b/test/helpers/ethtxmanmock_e2e.go index 1a42ea17c..f57476de5 100644 --- a/test/helpers/ethtxmanmock_e2e.go +++ b/test/helpers/ethtxmanmock_e2e.go @@ -7,8 +7,8 @@ import ( big "math/big" "testing" - "github.com/0xPolygon/cdk/log" ethtxtypes "github.com/0xPolygon/zkevm-ethtx-manager/types" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" @@ -73,9 +73,11 @@ func NewEthTxManMock( return } }). - Return(common.Hash{}, nil) + Return(common.Hash{}, nil). + Maybe() ethTxMock.On("Result", mock.Anything, mock.Anything). - Return(ethtxtypes.MonitoredTxResult{Status: ethtxtypes.MonitoredTxStatusMined}, nil) + Return(ethtxtypes.MonitoredTxResult{Status: ethtxtypes.MonitoredTxStatusMined}, nil). + Maybe() return ethTxMock } diff --git a/test/helpers/mock_ethtxmanager.go b/test/helpers/mock_eth_tx_manager.go similarity index 61% rename from test/helpers/mock_ethtxmanager.go rename to test/helpers/mock_eth_tx_manager.go index ac10be2a8..2320a1436 100644 --- a/test/helpers/mock_ethtxmanager.go +++ b/test/helpers/mock_eth_tx_manager.go @@ -1,14 +1,13 @@ -// Code generated by mockery. DO NOT EDIT. +// Code generated by mockery v2.52.2. DO NOT EDIT. package helpers import ( + context "context" big "math/big" common "github.com/ethereum/go-ethereum/common" - context "context" - mock "github.com/stretchr/testify/mock" types "github.com/ethereum/go-ethereum/core/types" @@ -92,9 +91,73 @@ func (_c *EthTxManagerMock_Add_Call) RunAndReturn(run func(context.Context, *com return _c } -// Remove provides a mock function with given fields: ctx, id -func (_m *EthTxManagerMock) Remove(ctx context.Context, id common.Hash) error { - ret := _m.Called(ctx, id) +// AddWithGas provides a mock function with given fields: ctx, to, value, data, gasOffset, sidecar, gas +func (_m *EthTxManagerMock) AddWithGas(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64) (common.Hash, error) { + ret := _m.Called(ctx, to, value, data, gasOffset, sidecar, gas) + + if len(ret) == 0 { + panic("no return value specified for AddWithGas") + } + + var r0 common.Hash + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)); ok { + return rf(ctx, to, value, data, gasOffset, sidecar, gas) + } + if rf, ok := ret.Get(0).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) common.Hash); ok { + r0 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(common.Hash) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) error); ok { + r1 = rf(ctx, to, value, data, gasOffset, sidecar, gas) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthTxManagerMock_AddWithGas_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddWithGas' +type EthTxManagerMock_AddWithGas_Call struct { + *mock.Call +} + +// AddWithGas is a helper method to define mock.On call +// - ctx context.Context +// - to *common.Address +// - value *big.Int +// - data []byte +// - gasOffset uint64 +// - sidecar *types.BlobTxSidecar +// - gas uint64 +func (_e *EthTxManagerMock_Expecter) AddWithGas(ctx interface{}, to interface{}, value interface{}, data interface{}, gasOffset interface{}, sidecar interface{}, gas interface{}) *EthTxManagerMock_AddWithGas_Call { + return &EthTxManagerMock_AddWithGas_Call{Call: _e.mock.On("AddWithGas", ctx, to, value, data, gasOffset, sidecar, gas)} +} + +func (_c *EthTxManagerMock_AddWithGas_Call) Run(run func(ctx context.Context, to *common.Address, value *big.Int, data []byte, gasOffset uint64, sidecar *types.BlobTxSidecar, gas uint64)) *EthTxManagerMock_AddWithGas_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(*common.Address), args[2].(*big.Int), args[3].([]byte), args[4].(uint64), args[5].(*types.BlobTxSidecar), args[6].(uint64)) + }) + return _c +} + +func (_c *EthTxManagerMock_AddWithGas_Call) Return(_a0 common.Hash, _a1 error) *EthTxManagerMock_AddWithGas_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthTxManagerMock_AddWithGas_Call) RunAndReturn(run func(context.Context, *common.Address, *big.Int, []byte, uint64, *types.BlobTxSidecar, uint64) (common.Hash, error)) *EthTxManagerMock_AddWithGas_Call { + _c.Call.Return(run) + return _c +} + +// Remove provides a mock function with given fields: ctx, hash +func (_m *EthTxManagerMock) Remove(ctx context.Context, hash common.Hash) error { + ret := _m.Called(ctx, hash) if len(ret) == 0 { panic("no return value specified for Remove") @@ -102,7 +165,7 @@ func (_m *EthTxManagerMock) Remove(ctx context.Context, id common.Hash) error { var r0 error if rf, ok := ret.Get(0).(func(context.Context, common.Hash) error); ok { - r0 = rf(ctx, id) + r0 = rf(ctx, hash) } else { r0 = ret.Error(0) } @@ -117,12 +180,12 @@ type EthTxManagerMock_Remove_Call struct { // Remove is a helper method to define mock.On call // - ctx context.Context -// - id common.Hash -func (_e *EthTxManagerMock_Expecter) Remove(ctx interface{}, id interface{}) *EthTxManagerMock_Remove_Call { - return &EthTxManagerMock_Remove_Call{Call: _e.mock.On("Remove", ctx, id)} +// - hash common.Hash +func (_e *EthTxManagerMock_Expecter) Remove(ctx interface{}, hash interface{}) *EthTxManagerMock_Remove_Call { + return &EthTxManagerMock_Remove_Call{Call: _e.mock.On("Remove", ctx, hash)} } -func (_c *EthTxManagerMock_Remove_Call) Run(run func(ctx context.Context, id common.Hash)) *EthTxManagerMock_Remove_Call { +func (_c *EthTxManagerMock_Remove_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthTxManagerMock_Remove_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(common.Hash)) }) @@ -139,9 +202,9 @@ func (_c *EthTxManagerMock_Remove_Call) RunAndReturn(run func(context.Context, c return _c } -// Result provides a mock function with given fields: ctx, id -func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { - ret := _m.Called(ctx, id) +// Result provides a mock function with given fields: ctx, hash +func (_m *EthTxManagerMock) Result(ctx context.Context, hash common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, hash) if len(ret) == 0 { panic("no return value specified for Result") @@ -150,16 +213,16 @@ func (_m *EthTxManagerMock) Result(ctx context.Context, id common.Hash) (zkevm_e var r0 zkevm_ethtx_managertypes.MonitoredTxResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, common.Hash) (zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { - return rf(ctx, id) + return rf(ctx, hash) } if rf, ok := ret.Get(0).(func(context.Context, common.Hash) zkevm_ethtx_managertypes.MonitoredTxResult); ok { - r0 = rf(ctx, id) + r0 = rf(ctx, hash) } else { r0 = ret.Get(0).(zkevm_ethtx_managertypes.MonitoredTxResult) } if rf, ok := ret.Get(1).(func(context.Context, common.Hash) error); ok { - r1 = rf(ctx, id) + r1 = rf(ctx, hash) } else { r1 = ret.Error(1) } @@ -174,12 +237,12 @@ type EthTxManagerMock_Result_Call struct { // Result is a helper method to define mock.On call // - ctx context.Context -// - id common.Hash -func (_e *EthTxManagerMock_Expecter) Result(ctx interface{}, id interface{}) *EthTxManagerMock_Result_Call { - return &EthTxManagerMock_Result_Call{Call: _e.mock.On("Result", ctx, id)} +// - hash common.Hash +func (_e *EthTxManagerMock_Expecter) Result(ctx interface{}, hash interface{}) *EthTxManagerMock_Result_Call { + return &EthTxManagerMock_Result_Call{Call: _e.mock.On("Result", ctx, hash)} } -func (_c *EthTxManagerMock_Result_Call) Run(run func(ctx context.Context, id common.Hash)) *EthTxManagerMock_Result_Call { +func (_c *EthTxManagerMock_Result_Call) Run(run func(ctx context.Context, hash common.Hash)) *EthTxManagerMock_Result_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(common.Hash)) }) @@ -196,9 +259,9 @@ func (_c *EthTxManagerMock_Result_Call) RunAndReturn(run func(context.Context, c return _c } -// ResultsByStatus provides a mock function with given fields: ctx, statuses -func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { - ret := _m.Called(ctx, statuses) +// ResultsByStatus provides a mock function with given fields: ctx, status +func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, status []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error) { + ret := _m.Called(ctx, status) if len(ret) == 0 { panic("no return value specified for ResultsByStatus") @@ -207,10 +270,10 @@ func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []zkev var r0 []zkevm_ethtx_managertypes.MonitoredTxResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) ([]zkevm_ethtx_managertypes.MonitoredTxResult, error)); ok { - return rf(ctx, statuses) + return rf(ctx, status) } if rf, ok := ret.Get(0).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) []zkevm_ethtx_managertypes.MonitoredTxResult); ok { - r0 = rf(ctx, statuses) + r0 = rf(ctx, status) } else { if ret.Get(0) != nil { r0 = ret.Get(0).([]zkevm_ethtx_managertypes.MonitoredTxResult) @@ -218,7 +281,7 @@ func (_m *EthTxManagerMock) ResultsByStatus(ctx context.Context, statuses []zkev } if rf, ok := ret.Get(1).(func(context.Context, []zkevm_ethtx_managertypes.MonitoredTxStatus) error); ok { - r1 = rf(ctx, statuses) + r1 = rf(ctx, status) } else { r1 = ret.Error(1) } @@ -233,12 +296,12 @@ type EthTxManagerMock_ResultsByStatus_Call struct { // ResultsByStatus is a helper method to define mock.On call // - ctx context.Context -// - statuses []zkevm_ethtx_managertypes.MonitoredTxStatus -func (_e *EthTxManagerMock_Expecter) ResultsByStatus(ctx interface{}, statuses interface{}) *EthTxManagerMock_ResultsByStatus_Call { - return &EthTxManagerMock_ResultsByStatus_Call{Call: _e.mock.On("ResultsByStatus", ctx, statuses)} +// - status []zkevm_ethtx_managertypes.MonitoredTxStatus +func (_e *EthTxManagerMock_Expecter) ResultsByStatus(ctx interface{}, status interface{}) *EthTxManagerMock_ResultsByStatus_Call { + return &EthTxManagerMock_ResultsByStatus_Call{Call: _e.mock.On("ResultsByStatus", ctx, status)} } -func (_c *EthTxManagerMock_ResultsByStatus_Call) Run(run func(ctx context.Context, statuses []zkevm_ethtx_managertypes.MonitoredTxStatus)) *EthTxManagerMock_ResultsByStatus_Call { +func (_c *EthTxManagerMock_ResultsByStatus_Call) Run(run func(ctx context.Context, status []zkevm_ethtx_managertypes.MonitoredTxStatus)) *EthTxManagerMock_ResultsByStatus_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].([]zkevm_ethtx_managertypes.MonitoredTxStatus)) }) @@ -255,6 +318,38 @@ func (_c *EthTxManagerMock_ResultsByStatus_Call) RunAndReturn(run func(context.C return _c } +// Start provides a mock function with no fields +func (_m *EthTxManagerMock) Start() { + _m.Called() +} + +// EthTxManagerMock_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start' +type EthTxManagerMock_Start_Call struct { + *mock.Call +} + +// Start is a helper method to define mock.On call +func (_e *EthTxManagerMock_Expecter) Start() *EthTxManagerMock_Start_Call { + return &EthTxManagerMock_Start_Call{Call: _e.mock.On("Start")} +} + +func (_c *EthTxManagerMock_Start_Call) Run(run func()) *EthTxManagerMock_Start_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *EthTxManagerMock_Start_Call) Return() *EthTxManagerMock_Start_Call { + _c.Call.Return() + return _c +} + +func (_c *EthTxManagerMock_Start_Call) RunAndReturn(run func()) *EthTxManagerMock_Start_Call { + _c.Run(run) + return _c +} + // NewEthTxManagerMock creates a new instance of EthTxManagerMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewEthTxManagerMock(t interface { diff --git a/test/helpers/reorg.go b/test/helpers/reorg.go index 601cbe712..136de16a4 100644 --- a/test/helpers/reorg.go +++ b/test/helpers/reorg.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/0xPolygon/cdk/log" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/ethclient/simulated" "github.com/stretchr/testify/require" ) diff --git a/test/helpers/simulated.go b/test/helpers/simulated.go index 2b0b0dac1..5cc7b8752 100644 --- a/test/helpers/simulated.go +++ b/test/helpers/simulated.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/0xPolygon/cdk-contracts-tooling/contracts/l2-sovereign-chain/polygonzkevmbridgev2" - "github.com/0xPolygon/cdk/log" "github.com/0xPolygon/cdk/test/contracts/transparentupgradableproxy" + "github.com/agglayer/aggkit/log" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/test/run-e2e-multi_pp.sh b/test/run-e2e-multi_pp.sh deleted file mode 100755 index f63013568..000000000 --- a/test/run-e2e-multi_pp.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -source $(dirname $0)/scripts/env.sh - -function log_error() { - echo -e "\033[0;31mError: $*" "\033[0m" -} - -function log_fatal() { - log_error $* - exit 1 -} - -function ok_or_fatal(){ - if [ $? -ne 0 ]; then - log_fatal $* - fi -} - -function build_docker_if_required(){ - docker images -q cdk:latest > /dev/null - if [ $? -ne 0 ] ; then - echo "Building cdk:latest" - pushd $BASE_FOLDER/.. - make build-docker - ok_or_fatal "Failed to build docker image" - popd - else - echo "docker cdk:latest already exists" - fi -} - -function resolve_template(){ - local _TEMPLATE_FILE="$1" - local _RESULT_VARNAME="$2" - local _TEMP_FILE=$(mktemp --suffix ".yml") - echo "rendering $_TEMPLATE_FILE to temp file $_TEMP_FILE" - go run ../scripts/run_template.go $_TEMPLATE_FILE > $_TEMP_FILE - ok_or_fatal "Failed to render template $_TEMPLATE_FILE" - grep "" "$_TEMP_FILE" - if [ $? -eq 0 ]; then - log_fatal "Failed to render template $_TEMPLATE_FILE. missing values" - fi - eval $_RESULT_VARNAME="$_TEMP_FILE" -} - -############################################################################### -# MAIN -############################################################################### -BASE_FOLDER=$(dirname $0) -PP1_ORIGIN_CONFIG_FILE=combinations/fork12-pessimistic-multi.yml -PP2_ORIGIN_CONFIG_FILE=combinations/fork12-pessimistic-multi-attach-second-cdk.yml -KURTOSIS_ENCLAVE=cdk - -[ -z $KURTOSIS_FOLDER ] && echo "KURTOSIS_FOLDER is not set" && exit 1 -[ ! -d $KURTOSIS_FOLDER ] && echo "KURTOSIS_FOLDER is not a directory ($KURTOSIS_FOLDER)" && exit 1 - - -[ ! -f $PP1_ORIGIN_CONFIG_FILE ] && echo "File $PP1_ORIGIN_CONFIG_FILE does not exist" && exit 1 -[ ! -f $PP2_ORIGIN_CONFIG_FILE ] && echo "File $PP2_ORIGIN_CONFIG_FILE does not exist" && exit 1 - -build_docker_if_required -resolve_template $PP1_ORIGIN_CONFIG_FILE PP1_RENDERED_CONFIG_FILE -resolve_template $PP2_ORIGIN_CONFIG_FILE PP2_RENDERED_CONFIG_FILE - -kurtosis clean --all -kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file "$PP1_RENDERED_CONFIG_FILE" --image-download always $KURTOSIS_FOLDER -ok_or_fatal "Failed to run kurtosis pp1" - -kurtosis run --enclave $KURTOSIS_ENCLAVE --args-file "$PP2_RENDERED_CONFIG_FILE" --image-download always $KURTOSIS_FOLDER -ok_or_fatal "Failed to run kurtosis attached second cdk" diff --git a/test/run-e2e.sh b/test/run-e2e.sh index adbbcbcb1..d0e3f12eb 100755 --- a/test/run-e2e.sh +++ b/test/run-e2e.sh @@ -9,7 +9,7 @@ fi DATA_AVAILABILITY_MODE=$2 if [ -z $DATA_AVAILABILITY_MODE ]; then - echo "Missing DATA_AVAILABILITY_MODE: ['rollup', 'cdk-validium', 'pessimistic']" + echo "Missing DATA_AVAILABILITY_MODE: ['rollup', 'cdk-validium']" exit 1 fi diff --git a/test/scripts/agglayer_certificates_monitor.sh b/test/scripts/agglayer_certificates_monitor.sh deleted file mode 100755 index c530548f8..000000000 --- a/test/scripts/agglayer_certificates_monitor.sh +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash -# This script monitors the agglayer certificates progress of pessimistic proof. - -function parse_params(){ - # Check if the required arguments are provided. - if [ "$#" -lt 3 ]; then - echo "Usage: $0 " - exit 1 - fi - - # The number of batches to be verified. - settle_certificates_target="$1" - - # The script timeout (in seconds). - timeout="$2" - - # The network id of the L2 network. - l2_rpc_network_id="$3" -} - -function check_timeout(){ - local _end_time=$1 - current_time=$(date +%s) - if ((current_time > _end_time)); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ❌ Exiting... Timeout reached not found the expected numbers of settled certs!" - exit 1 - fi -} - -function check_num_certificates(){ - readonly agglayer_rpc_url="$(kurtosis port print cdk agglayer agglayer)" - - cast_output=$(cast rpc --rpc-url "$agglayer_rpc_url" "interop_getLatestKnownCertificateHeader" "$l2_rpc_network_id" 2>&1) - - if [ $? -ne 0 ]; then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] Error executing command cast rpc: $cast_output" - return - fi - - height=$(extract_certificate_height "$cast_output") - [[ -z "$height" ]] && { - echo "Error: Failed to extract certificate height: $height." >&3 - return - } - - status=$(extract_certificate_status "$cast_output") - [[ -z "$status" ]] && { - echo "Error: Failed to extract certificate status." >&3 - return - } - - echo "[$(date '+%Y-%m-%d %H:%M:%S')] Last known agglayer certificate height: $height, status: $status" >&3 - - if (( height > settle_certificates_target - 1 )); then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ Success! The number of settled certificates has reached the target." >&3 - exit 0 - fi - - if (( height == settle_certificates_target - 1 )); then - if [ "$status" == "Settled" ]; then - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ✅ Success! The number of settled certificates has reached the target." >&3 - exit 0 - fi - - echo "[$(date '+%Y-%m-%d %H:%M:%S')] ⚠️ Warning! The number of settled certificates is one less than the target." >&3 - fi -} - -function extract_certificate_height() { - local cast_output="$1" - echo "$cast_output" | jq -r '.height' -} - -function extract_certificate_status() { - local cast_output="$1" - echo "$cast_output" | jq -r '.status' -} - -# MAIN - -parse_params $* -start_time=$(date +%s) -end_time=$((start_time + timeout)) -echo "[$(date '+%Y-%m-%d %H:%M:%S')] Start monitoring agglayer certificates progress..." -while true; do - check_num_certificates - check_timeout $end_time - sleep 10 -done diff --git a/tools/aggsender_find_imported_bridge/aggsender_find_imported_bridge.go b/tools/aggsender_find_imported_bridge/aggsender_find_imported_bridge.go deleted file mode 100644 index 28115129d..000000000 --- a/tools/aggsender_find_imported_bridge/aggsender_find_imported_bridge.go +++ /dev/null @@ -1,121 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "math/big" - "os" - - "github.com/0xPolygon/cdk/agglayer" - "github.com/0xPolygon/cdk/aggsender/rpcclient" - "github.com/0xPolygon/cdk/aggsender/types" - "github.com/0xPolygon/cdk/bridgesync" - "github.com/0xPolygon/cdk/log" -) - -const ( - errLevelUnexpected = 1 - errLevelWrongParams = 2 - errLevelComms = 3 - errLevelNotFound = 4 - errLevelFoundButNotSettled = 5 - - base10 = 10 - minimumNumArgs = 3 -) - -func unmarshalGlobalIndex(globalIndex string) (*agglayer.GlobalIndex, error) { - var globalIndexParsed agglayer.GlobalIndex - // First try if it's already decomposed - err := json.Unmarshal([]byte(globalIndex), &globalIndexParsed) - if err != nil { - bigInt := new(big.Int) - _, ok := bigInt.SetString(globalIndex, base10) - if !ok { - return nil, fmt.Errorf("invalid global index: %v", globalIndex) - } - mainnetFlag, rollupIndex, leafIndex, err := bridgesync.DecodeGlobalIndex(bigInt) - if err != nil { - return nil, fmt.Errorf("invalid global index, fail to decode: %v", globalIndex) - } - globalIndexParsed.MainnetFlag = mainnetFlag - globalIndexParsed.RollupIndex = rollupIndex - globalIndexParsed.LeafIndex = leafIndex - } - return &globalIndexParsed, nil -} - -// This function find out the certificate for a deposit -// It use the aggsender RPC -func certContainsGlobalIndex(cert *types.CertificateInfo, globalIndex *agglayer.GlobalIndex) (bool, error) { - if cert == nil { - return false, nil - } - var certSigned agglayer.SignedCertificate - err := json.Unmarshal([]byte(cert.SignedCertificate), &certSigned) - if err != nil { - log.Debugf("cert: %v", cert.SignedCertificate) - return false, fmt.Errorf("error Unmarshal cert. Err: %w", err) - } - for _, importedBridge := range certSigned.ImportedBridgeExits { - if *importedBridge.GlobalIndex == *globalIndex { - return true, nil - } - } - return false, nil -} - -func main() { - if len(os.Args) != minimumNumArgs { - fmt.Printf("Wrong number of arguments\n") - fmt.Printf(" Usage: %v \n", os.Args[0]) - os.Exit(errLevelWrongParams) - } - aggsenderRPC := os.Args[1] - globalIndex := os.Args[2] - decodedGlobalIndex, err := unmarshalGlobalIndex(globalIndex) - if err != nil { - log.Errorf("Error unmarshalGlobalIndex: %v", err) - os.Exit(errLevelWrongParams) - } - log.Debugf("decodedGlobalIndex: %v", decodedGlobalIndex) - aggsenderClient := rpcclient.NewClient(aggsenderRPC) - // Get latest certificate - cert, err := aggsenderClient.GetCertificateHeaderPerHeight(nil) - if err != nil { - log.Errorf("Error: %v", err) - os.Exit(errLevelComms) - } - - currentHeight := cert.Height - for cert != nil { - found, err := certContainsGlobalIndex(cert, decodedGlobalIndex) - if err != nil { - log.Errorf("Error: %v", err) - os.Exit(errLevelUnexpected) - } - if found { - log.Infof("Found certificate for global index: %v", globalIndex) - if cert.Status.IsSettled() { - log.Infof("Certificate is settled: %s status:%s", cert.ID(), cert.Status.String()) - os.Exit(0) - } - log.Errorf("Certificate is not settled") - os.Exit(errLevelFoundButNotSettled) - } else { - log.Debugf("Certificate not found for global index: %v", globalIndex) - } - // We have check the oldest cert - if currentHeight == 0 { - log.Errorf("Checked all certs and it's not found") - os.Exit(errLevelNotFound) - } - log.Infof("Checking previous certificate, height: %v", currentHeight) - cert, err = aggsenderClient.GetCertificateHeaderPerHeight(¤tHeight) - if err != nil { - log.Errorf("Error: %v", err) - os.Exit(errLevelComms) - } - currentHeight-- - } -} diff --git a/translator/translator_impl.go b/translator/translator_impl.go index cd7fbc426..68a3f8459 100644 --- a/translator/translator_impl.go +++ b/translator/translator_impl.go @@ -1,6 +1,6 @@ package translator -import "github.com/0xPolygon/cdk/log" +import "github.com/agglayer/aggkit/log" type TranslatorFullMatchRule struct { // If null match any context diff --git a/tree/appendonlytree.go b/tree/appendonlytree.go deleted file mode 100644 index 7dbead014..000000000 --- a/tree/appendonlytree.go +++ /dev/null @@ -1,131 +0,0 @@ -package tree - -import ( - "database/sql" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -var ( - ErrInvalidIndex = errors.New("invalid index") -) - -// AppendOnlyTree is a tree where leaves are added sequentially (by index) -type AppendOnlyTree struct { - *Tree - lastLeftCache [types.DefaultHeight]common.Hash - lastIndex int64 -} - -// NewAppendOnlyTree creates a AppendOnlyTree -func NewAppendOnlyTree(db *sql.DB, dbPrefix string) *AppendOnlyTree { - t := newTree(db, dbPrefix) - return &AppendOnlyTree{ - Tree: t, - // -1 is used to indicate no leafs, 0 means the first leaf is added (at index 0) and so on. - // In order to differentiate the "cache not initialised" we need any value smaller than -1 - lastIndex: -2, - } -} - -func (t *AppendOnlyTree) AddLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) error { - if int64(leaf.Index) != t.lastIndex+1 { - // rebuild cache - if err := t.initCache(tx); err != nil { - return err - } - if int64(leaf.Index) != t.lastIndex+1 { - log.Errorf( - "mismatched index. Expected: %d, actual: %d", - t.lastIndex+1, leaf.Index, - ) - return ErrInvalidIndex - } - } - // Calculate new tree nodes - currentChildHash := leaf.Hash - newNodes := []types.TreeNode{} - for h := uint8(0); h < types.DefaultHeight; h++ { - var parent types.TreeNode - if leaf.Index&(1< 0 { - // Add child to the right - parent = newTreeNode(t.lastLeftCache[h], currentChildHash) - } else { - // Add child to the left - parent = newTreeNode(currentChildHash, t.zeroHashes[h]) - // Update cache - t.lastLeftCache[h] = currentChildHash - } - currentChildHash = parent.Hash - newNodes = append(newNodes, parent) - } - - // store root - if err := t.storeRoot(tx, types.Root{ - Hash: currentChildHash, - Index: leaf.Index, - BlockNum: blockNum, - BlockPosition: blockPosition, - }); err != nil { - return err - } - - // store nodes - if err := t.storeNodes(tx, newNodes); err != nil { - return err - } - t.lastIndex++ - tx.AddRollbackCallback(func() { - log.Debugf("decreasing index due to rollback") - t.lastIndex-- - }) - return nil -} - -func (t *AppendOnlyTree) initCache(tx db.Txer) error { - siblings := [types.DefaultHeight]common.Hash{} - lastRoot, err := t.getLastRootWithTx(tx) - if err != nil { - if errors.Is(err, db.ErrNotFound) { - t.lastIndex = -1 - t.lastLeftCache = siblings - return nil - } - return err - } - t.lastIndex = int64(lastRoot.Index) - currentNodeHash := lastRoot.Hash - index := t.lastIndex - // It starts in height-1 because 0 is the level of the leafs - for h := int(types.DefaultHeight - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(tx, currentNodeHash) - if err != nil { - return fmt.Errorf( - "error getting node %s from the RHT at height %d with root %s: %w", - currentNodeHash.Hex(), h, lastRoot.Hash.Hex(), err, - ) - } - if currentNode == nil { - return db.ErrNotFound - } - siblings[h] = currentNode.Left - if index&(1< 0 { - currentNodeHash = currentNode.Right - } else { - currentNodeHash = currentNode.Left - } - } - - // Reverse the siblings to go from leafs to root - for i, j := 0, len(siblings)-1; i == j; i, j = i+1, j-1 { - siblings[i], siblings[j] = siblings[j], siblings[i] - } - - t.lastLeftCache = siblings - return nil -} diff --git a/tree/migrations/migrations.go b/tree/migrations/migrations.go deleted file mode 100644 index dd5847e7c..000000000 --- a/tree/migrations/migrations.go +++ /dev/null @@ -1,22 +0,0 @@ -package migrations - -import ( - _ "embed" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/db/types" -) - -//go:embed tree0001.sql -var mig001 string - -var Migrations = []types.Migration{ - { - ID: "tree001", - SQL: mig001, - }, -} - -func RunMigrations(dbPath string) error { - return db.RunMigrations(dbPath, Migrations) -} diff --git a/tree/migrations/tree0001.sql b/tree/migrations/tree0001.sql deleted file mode 100644 index f70d048eb..000000000 --- a/tree/migrations/tree0001.sql +++ /dev/null @@ -1,17 +0,0 @@ --- +migrate Down -DROP TABLE IF EXISTS /*dbprefix*/root; -DROP TABLE IF EXISTS /*dbprefix*/rht; - --- +migrate Up -CREATE TABLE /*dbprefix*/root ( - hash VARCHAR PRIMARY KEY, - position INTEGER NOT NULL, - block_num BIGINT NOT NULL, - block_position BIGINT NOT NULL -); - -CREATE TABLE /*dbprefix*/rht ( - hash VARCHAR PRIMARY KEY, - left VARCHAR NOT NULL, - right VARCHAR NOT NULL -); diff --git a/tree/testvectors/claim-vectors.json b/tree/testvectors/claim-vectors.json deleted file mode 100644 index 4778e9e43..000000000 --- a/tree/testvectors/claim-vectors.json +++ /dev/null @@ -1,306 +0,0 @@ -[ - { - "leafs": [ - { - "originNetwork": 0, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x", - "leafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" - }, - { - "originNetwork": 1, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 0, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345670", - "leafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" - }, - { - "originNetwork": 0, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345678", - "leafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" - }, - { - "originNetwork": 10, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x01", - "destinationNetwork": 4, - "destinationAddress": "0x0000000000000000000000000000000000000000", - "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266", - "leafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242" - } - ], - "index": 0, - "proof": [ - "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", - "0xb48c8301099f75206bc93b1512c7b3855b60b4f8cbaedf8679a184d1d450a4f1", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" - ], - "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" - }, - { - "leafs": [ - { - "originNetwork": 0, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x", - "leafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" - }, - { - "originNetwork": 1, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 0, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345670", - "leafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" - }, - { - "originNetwork": 0, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345678", - "leafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" - }, - { - "originNetwork": 10, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x01", - "destinationNetwork": 4, - "destinationAddress": "0x0000000000000000000000000000000000000000", - "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266", - "leafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242" - } - ], - "index": 1, - "proof": [ - "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", - "0xb48c8301099f75206bc93b1512c7b3855b60b4f8cbaedf8679a184d1d450a4f1", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" - ], - "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" - }, - { - "leafs": [ - { - "originNetwork": 0, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x", - "leafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" - }, - { - "originNetwork": 1, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 0, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345670", - "leafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" - }, - { - "originNetwork": 0, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345678", - "leafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" - }, - { - "originNetwork": 10, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x01", - "destinationNetwork": 4, - "destinationAddress": "0x0000000000000000000000000000000000000000", - "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266", - "leafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242" - } - ], - "index": 2, - "proof": [ - "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242", - "0x653142d4a4d6f7985a3f33cad31e011dbee8909846b34c38c7b235ca08828521", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" - ], - "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" - }, - { - "leafs": [ - { - "originNetwork": 0, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x", - "leafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" - }, - { - "originNetwork": 1, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 0, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345670", - "leafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" - }, - { - "originNetwork": 0, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345678", - "leafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" - }, - { - "originNetwork": 10, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x01", - "destinationNetwork": 4, - "destinationAddress": "0x0000000000000000000000000000000000000000", - "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266", - "leafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242" - } - ], - "index": 3, - "proof": [ - "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d", - "0x653142d4a4d6f7985a3f33cad31e011dbee8909846b34c38c7b235ca08828521", - "0xb4c11951957c6f8f642c4af61cd6b24640fec6dc7fc607ee8206a99e92410d30", - "0x21ddb9a356815c3fac1026b6dec5df3124afbadb485c9ba5a3e3398a04b7ba85", - "0xe58769b32a1beaf1ea27375a44095a0d1fb664ce2dd358e7fcbfb78c26a19344", - "0x0eb01ebfc9ed27500cd4dfc979272d1f0913cc9f66540d7e8005811109e1cf2d", - "0x887c22bd8750d34016ac3c66b5ff102dacdd73f6b014e710b51e8022af9a1968", - "0xffd70157e48063fc33c97a050f7f640233bf646cc98d9524c6b92bcf3ab56f83", - "0x9867cc5f7f196b93bae1e27e6320742445d290f2263827498b54fec539f756af", - "0xcefad4e508c098b9a7e1d8feb19955fb02ba9675585078710969d3440f5054e0", - "0xf9dc3e7fe016e050eff260334f18a5d4fe391d82092319f5964f2e2eb7c1c3a5", - "0xf8b13a49e282f609c317a833fb8d976d11517c571d1221a265d25af778ecf892", - "0x3490c6ceeb450aecdc82e28293031d10c7d73bf85e57bf041a97360aa2c5d99c", - "0xc1df82d9c4b87413eae2ef048f94b4d3554cea73d92b0f7af96e0271c691e2bb", - "0x5c67add7c6caf302256adedf7ab114da0acfe870d449a3a489f781d659e8becc", - "0xda7bce9f4e8618b6bd2f4132ce798cdc7a60e7e1460a7299e3c6342a579626d2", - "0x2733e50f526ec2fa19a22b31e8ed50f23cd1fdf94c9154ed3a7609a2f1ff981f", - "0xe1d3b5c807b281e4683cc6d6315cf95b9ade8641defcb32372f1c126e398ef7a", - "0x5a2dce0a8a7f68bb74560f8f71837c2c2ebbcbf7fffb42ae1896f13f7c7479a0", - "0xb46a28b6f55540f89444f63de0378e3d121be09e06cc9ded1c20e65876d36aa0", - "0xc65e9645644786b620e2dd2ad648ddfcbf4a7e5b1a3a4ecfe7f64667a3f0b7e2", - "0xf4418588ed35a2458cffeb39b93d26f18d2ab13bdce6aee58e7b99359ec2dfd9", - "0x5a9c16dc00d6ef18b7933a6f8dc65ccb55667138776f7dea101070dc8796e377", - "0x4df84f40ae0c8229d0d6069e5c8f39a7c299677a09d367fc7b05e3bc380ee652", - "0xcdc72595f74c7b1043d0e1ffbab734648c838dfb0527d971b602bc216c9619ef", - "0x0abf5ac974a1ed57f4050aa510dd9c74f508277b39d7973bb2dfccc5eeb0618d", - "0xb8cd74046ff337f0a7bf2c8e03e10f642c1886798d71806ab1e888d9e5ee87d0", - "0x838c5655cb21c6cb83313b5a631175dff4963772cce9108188b34ac87c81c41e", - "0x662ee4dd2dd7b2bc707961b1e646c4047669dcb6584f0d8d770daf5d7e7deb2e", - "0x388ab20e2573d171a88108e79d820e98f26c0b84aa8b2f4aa4968dbb818ea322", - "0x93237c50ba75ee485f4c22adf2f741400bdf8d6a9cc7df7ecae576221665d735", - "0x8448818bb4ae4562849e949e17ac16e0be16688e156b5cf15e098c627c0056a9" - ], - "root": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" - } -] \ No newline at end of file diff --git a/tree/testvectors/leaf-vectors.json b/tree/testvectors/leaf-vectors.json deleted file mode 100644 index a0be08953..000000000 --- a/tree/testvectors/leaf-vectors.json +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "originNetwork": 0, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x", - "leafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" - }, - { - "originNetwork": 1, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 0, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345670", - "leafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" - }, - { - "originNetwork": 0, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "metadata": "0x12345678", - "leafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" - }, - { - "originNetwork": 10, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x01", - "destinationNetwork": 4, - "destinationAddress": "0x0000000000000000000000000000000000000000", - "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266", - "leafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242" - } -] \ No newline at end of file diff --git a/tree/testvectors/root-vectors.json b/tree/testvectors/root-vectors.json deleted file mode 100644 index b1c6929c0..000000000 --- a/tree/testvectors/root-vectors.json +++ /dev/null @@ -1,67 +0,0 @@ -[ - { - "previousLeafsValues": [], - "currentRoot": "0x27ae5ba08d7291c96c8cbddcc148bf48a6d68c7974b94356f53754ef6171d757", - "newLeaf": { - "originNetwork": 0, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "currentLeafValue": "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", - "metadata": "0x" - }, - "newRoot": "0xbf7ddbb59aa018a4c74e061f5172973ff09e4cb7f58405af117fc521f1ca46aa" - }, - { - "previousLeafsValues": [ - "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5" - ], - "currentRoot": "0xbf7ddbb59aa018a4c74e061f5172973ff09e4cb7f58405af117fc521f1ca46aa", - "newLeaf": { - "originNetwork": 1, - "tokenAddress": "0x6B175474E89094C44Da98b954EedeAC495271d0F", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 0, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "currentLeafValue": "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", - "metadata": "0x12345670" - }, - "newRoot": "0xa7042a3ce14f384bbff63f1cee6ee5579193c2d7002e0034854963322cda6128" - }, - { - "previousLeafsValues": [ - "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", - "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f" - ], - "currentRoot": "0xa7042a3ce14f384bbff63f1cee6ee5579193c2d7002e0034854963322cda6128", - "newLeaf": { - "originNetwork": 0, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x8ac7230489e80000", - "destinationNetwork": 1, - "destinationAddress": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266", - "currentLeafValue": "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d", - "metadata": "0x12345678" - }, - "newRoot": "0x88e652896cb1de5962a0173a222059f51e6b943a2ba6dfc9acbff051ceb1abb5" - }, - { - "previousLeafsValues": [ - "0xa4bfa0908dc7b06d98da4309f859023d6947561bc19bc00d77f763dea1a0b9f5", - "0x315fee1aa202bf4a6bd0fde560c89be90b6e6e2aaf92dc5e8d118209abc3410f", - "0xb598ce65aa15c08dda126a2985ba54f0559eaac562bb43ba430c7344261fbc5d" - ], - "currentRoot": "0x88e652896cb1de5962a0173a222059f51e6b943a2ba6dfc9acbff051ceb1abb5", - "newLeaf": { - "originNetwork": 10, - "tokenAddress": "0x0000000000000000000000000000000000000000", - "amount": "0x01", - "destinationNetwork": 4, - "destinationAddress": "0x0000000000000000000000000000000000000000", - "currentLeafValue": "0xe6585bdf74b6a46b9ede8b1b877e1232fb79ee93106c4db8ffd49cf1685bf242", - "metadata": "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb922661234e51aad88F6F4ce6aB8827279cffFb92266" - }, - "newRoot": "0x42d3339fe8eb57770953423f20a029e778a707e8d58aaf110b40d5eb4dd25721" - } -] \ No newline at end of file diff --git a/tree/testvectors/types.go b/tree/testvectors/types.go deleted file mode 100644 index 27bc1abbc..000000000 --- a/tree/testvectors/types.go +++ /dev/null @@ -1,59 +0,0 @@ -package testvectors - -import ( - "encoding/binary" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/iden3/go-iden3-crypto/keccak256" -) - -// DepositVectorRaw represents the deposit vector -type DepositVectorRaw struct { - OriginNetwork uint32 `json:"originNetwork"` - TokenAddress string `json:"tokenAddress"` - Amount string `json:"amount"` - DestinationNetwork uint32 `json:"destinationNetwork"` - DestinationAddress string `json:"destinationAddress"` - ExpectedHash string `json:"leafValue"` - CurrentHash string `json:"currentLeafValue"` - Metadata string `json:"metadata"` -} - -func (d *DepositVectorRaw) Hash() common.Hash { - origNet := make([]byte, 4) //nolint:mnd - binary.BigEndian.PutUint32(origNet, d.OriginNetwork) - destNet := make([]byte, 4) //nolint:mnd - binary.BigEndian.PutUint32(destNet, d.DestinationNetwork) - - metaHash := keccak256.Hash(common.FromHex(d.Metadata)) - var buf [32]byte - amount, _ := big.NewInt(0).SetString(d.Amount, 0) - origAddrBytes := common.HexToAddress(d.TokenAddress) - destAddrBytes := common.HexToAddress(d.DestinationAddress) - return common.BytesToHash(keccak256.Hash( - []byte{0}, // LeafType - origNet, - origAddrBytes[:], - destNet, - destAddrBytes[:], - amount.FillBytes(buf[:]), - metaHash, - )) -} - -// MTClaimVectorRaw represents the merkle proof -type MTClaimVectorRaw struct { - Deposits []DepositVectorRaw `json:"leafs"` - Index uint32 `json:"index"` - MerkleProof []string `json:"proof"` - ExpectedRoot string `json:"root"` -} - -// MTRootVectorRaw represents the root of Merkle Tree -type MTRootVectorRaw struct { - ExistingLeaves []string `json:"previousLeafsValues"` - CurrentRoot string `json:"currentRoot"` - NewLeaf DepositVectorRaw `json:"newLeaf"` - NewRoot string `json:"newRoot"` -} diff --git a/tree/tree.go b/tree/tree.go deleted file mode 100644 index 51f88a6ee..000000000 --- a/tree/tree.go +++ /dev/null @@ -1,273 +0,0 @@ -package tree - -import ( - "context" - "database/sql" - "errors" - "fmt" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/russross/meddler" - "golang.org/x/crypto/sha3" -) - -var ( - EmptyProof = types.Proof{} -) - -type Tree struct { - db *sql.DB - zeroHashes []common.Hash - rhtTable string - rootTable string -} - -func newTreeNode(left, right common.Hash) types.TreeNode { - var hash common.Hash - hasher := sha3.NewLegacyKeccak256() - hasher.Write(left[:]) - hasher.Write(right[:]) - copy(hash[:], hasher.Sum(nil)) - return types.TreeNode{ - Hash: hash, - Left: left, - Right: right, - } -} - -func newTree(db *sql.DB, tablePrefix string) *Tree { - t := &Tree{ - db: db, - zeroHashes: generateZeroHashes(types.DefaultHeight), - rhtTable: tablePrefix + "rht", - rootTable: tablePrefix + "root", - } - - return t -} - -func (t *Tree) getSiblings(tx db.Querier, index uint32, root common.Hash) ( - siblings types.Proof, - hasUsedZeroHashes bool, - err error, -) { - currentNodeHash := root - // It starts in height-1 because 0 is the level of the leafs - for h := int(types.DefaultHeight - 1); h >= 0; h-- { - var currentNode *types.TreeNode - currentNode, err = t.getRHTNode(tx, currentNodeHash) - if err != nil { - if errors.Is(err, db.ErrNotFound) { - hasUsedZeroHashes = true - siblings[h] = t.zeroHashes[h] - err = nil - continue - } else { - err = fmt.Errorf( - "height: %d, currentNode: %s, error: %w", - h, currentNodeHash.Hex(), err, - ) - return - } - } - /* - * Root (level h=3 => height=4) - * / \ - * O5 O6 (level h=2) - * / \ / \ - * O1 O2 O3 O4 (level h=1) - * /\ /\ /\ /\ - * 0 1 2 3 4 5 6 7 Leafs (level h=0) - * Example 1: - * Choose index = 3 => 011 binary - * Assuming we are in level 1 => h=1; 1< 011&010=010 which is higher than 0 so we need the left sibling (O1) - * Example 2: - * Choose index = 4 => 100 binary - * Assuming we are in level 1 => h=1; 1< 100&010=000 which is not higher than 0 so we need the right sibling (O4) - * Example 3: - * Choose index = 4 => 100 binary - * Assuming we are in level 2 => h=2; 1< 100&100=100 which is higher than 0 so we need the left sibling (O5) - */ - if index&(1< 0 { - siblings[h] = currentNode.Left - currentNodeHash = currentNode.Right - } else { - siblings[h] = currentNode.Right - currentNodeHash = currentNode.Left - } - } - - return -} - -// GetProof returns the merkle proof for a given index and root. -func (t *Tree) GetProof(ctx context.Context, index uint32, root common.Hash) (types.Proof, error) { - siblings, isErrNotFound, err := t.getSiblings(t.db, index, root) - if err != nil { - return types.Proof{}, err - } - if isErrNotFound { - // TODO: Validate it. It returns a proof of a tree with missing leafs - log.Warnf("getSiblings returned proof with zero hashes for index %d and root %s", index, root.String()) - } - return siblings, nil -} - -func (t *Tree) getRHTNode(tx db.Querier, nodeHash common.Hash) (*types.TreeNode, error) { - node := &types.TreeNode{} - err := meddler.QueryRow( - tx, node, - fmt.Sprintf(`SELECT * FROM %s WHERE hash = $1`, t.rhtTable), - nodeHash.String(), - ) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return node, db.ErrNotFound - } - return node, err - } - return node, err -} - -func generateZeroHashes(height uint8) []common.Hash { - var zeroHashes = []common.Hash{ - {}, - } - // This generates a leaf = HashZero in position 0. In the rest of the positions that are - // equivalent to the ascending levels, we set the hashes of the nodes. - // So all nodes from level i=5 will have the same value and same children nodes. - for i := 1; i <= int(height); i++ { - hasher := sha3.NewLegacyKeccak256() - hasher.Write(zeroHashes[i-1][:]) - hasher.Write(zeroHashes[i-1][:]) - thisHeightHash := common.Hash{} - copy(thisHeightHash[:], hasher.Sum(nil)) - zeroHashes = append(zeroHashes, thisHeightHash) - } - return zeroHashes -} - -func (t *Tree) storeNodes(tx db.Txer, nodes []types.TreeNode) error { - for i := 0; i < len(nodes); i++ { - if err := meddler.Insert(tx, t.rhtTable, &nodes[i]); err != nil { - if sqliteErr, ok := db.SQLiteErr(err); ok { - if sqliteErr.ExtendedCode == db.UniqueConstrain { - // ignore repeated entries. This is likely to happen due to not - // cleaning RHT when reorg - continue - } - } - return err - } - } - return nil -} - -func (t *Tree) storeRoot(tx db.Txer, root types.Root) error { - return meddler.Insert(tx, t.rootTable, &root) -} - -// GetLastRoot returns the last processed root -func (t *Tree) GetLastRoot(tx db.Querier) (types.Root, error) { - if tx == nil { - tx = t.db - } - return t.getLastRootWithTx(tx) -} - -func (t *Tree) getLastRootWithTx(tx db.Querier) (types.Root, error) { - var root types.Root - err := meddler.QueryRow( - tx, &root, - fmt.Sprintf(`SELECT * FROM %s ORDER BY block_num DESC, block_position DESC LIMIT 1;`, t.rootTable), - ) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return root, db.ErrNotFound - } - return root, err - } - return root, nil -} - -// GetRootByIndex returns the root associated to the index -func (t *Tree) GetRootByIndex(ctx context.Context, index uint32) (types.Root, error) { - var root types.Root - if err := meddler.QueryRow( - t.db, &root, - fmt.Sprintf(`SELECT * FROM %s WHERE position = $1;`, t.rootTable), - index, - ); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return root, db.ErrNotFound - } - return root, err - } - return root, nil -} - -// GetRootByHash returns the root associated to the hash -func (t *Tree) GetRootByHash(ctx context.Context, hash common.Hash) (*types.Root, error) { - var root types.Root - if err := meddler.QueryRow( - t.db, &root, - fmt.Sprintf(`SELECT * FROM %s WHERE hash = $1;`, t.rootTable), - hash.Hex(), - ); err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, db.ErrNotFound - } - return nil, err - } - - return &root, nil -} - -func (t *Tree) GetLeaf(tx db.Querier, index uint32, root common.Hash) (common.Hash, error) { - currentNodeHash := root - for h := int(types.DefaultHeight - 1); h >= 0; h-- { - currentNode, err := t.getRHTNode(tx, currentNodeHash) - if err != nil { - return common.Hash{}, err - } - if index&(1< 0 { - currentNodeHash = currentNode.Right - } else { - currentNodeHash = currentNode.Left - } - } - - return currentNodeHash, nil -} - -// Reorg deletes all the data relevant from firstReorgedBlock (includded) and onwards -func (t *Tree) Reorg(tx db.Txer, firstReorgedBlock uint64) error { - _, err := tx.Exec( - fmt.Sprintf(`DELETE FROM %s WHERE block_num >= $1`, t.rootTable), - firstReorgedBlock, - ) - return err -} - -// CalculateRoot calculates the Merkle Root based on the leaf and proof of inclusion -func CalculateRoot(leafHash common.Hash, proof [types.DefaultHeight]common.Hash, index uint32) common.Hash { - node := leafHash - - // Compute the Merkle root - for height := uint8(0); height < types.DefaultHeight; height++ { - if (index>>height)&1 == 1 { - node = crypto.Keccak256Hash(proof[height].Bytes(), node.Bytes()) - } else { - node = crypto.Keccak256Hash(node.Bytes(), proof[height].Bytes()) - } - } - - return node -} diff --git a/tree/tree_test.go b/tree/tree_test.go deleted file mode 100644 index a08211a6d..000000000 --- a/tree/tree_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package tree_test - -import ( - "context" - "database/sql" - "encoding/json" - "fmt" - "os" - "path" - "testing" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/log" - "github.com/0xPolygon/cdk/tree" - "github.com/0xPolygon/cdk/tree/migrations" - "github.com/0xPolygon/cdk/tree/testvectors" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestCheckExpectedRoot(t *testing.T) { - createTreeDB := func() *sql.DB { - dbPath := path.Join(t.TempDir(), "treeTestCheckExpectedRoot.sqlite") - log.Debug("DB created at: ", dbPath) - require.NoError(t, migrations.RunMigrations(dbPath)) - treeDB, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - - return treeDB - } - - addLeaves := func(merkletree *tree.AppendOnlyTree, - treeDB *sql.DB, - numOfLeavesToAdd, from int) { - tx, err := db.NewTx(context.Background(), treeDB) - require.NoError(t, err) - - for i := from; i < from+numOfLeavesToAdd; i++ { - require.NoError(t, merkletree.AddLeaf(tx, uint64(i), 0, types.Leaf{ - Index: uint32(i), - Hash: common.HexToHash(fmt.Sprintf("%x", i)), - })) - } - - require.NoError(t, tx.Commit()) - } - - t.Run("Check when no reorg", func(t *testing.T) { - numOfLeavesToAdd := 10 - indexToCheck := uint32(numOfLeavesToAdd - 1) - - treeDB := createTreeDB() - merkleTree := tree.NewAppendOnlyTree(treeDB, "") - - addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) - - expectedRoot, err := merkleTree.GetLastRoot(nil) - require.NoError(t, err) - - addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) - - root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) - require.NoError(t, err) - require.Equal(t, expectedRoot.Hash, root2.Hash) - require.Equal(t, expectedRoot.Index, root2.Index) - }) - - t.Run("Check after rebuild tree when reorg", func(t *testing.T) { - numOfLeavesToAdd := 10 - indexToCheck := uint32(numOfLeavesToAdd - 1) - treeDB := createTreeDB() - merkleTree := tree.NewAppendOnlyTree(treeDB, "") - - addLeaves(merkleTree, treeDB, numOfLeavesToAdd, 0) - - expectedRoot, err := merkleTree.GetLastRoot(nil) - require.NoError(t, err) - - addLeaves(merkleTree, treeDB, numOfLeavesToAdd, numOfLeavesToAdd) - - // reorg tree - tx, err := db.NewTx(context.Background(), treeDB) - require.NoError(t, err) - require.NoError(t, merkleTree.Reorg(tx, uint64(indexToCheck+1))) - require.NoError(t, tx.Commit()) - - // rebuild cache on adding new leaf - tx, err = db.NewTx(context.Background(), treeDB) - require.NoError(t, err) - require.NoError(t, merkleTree.AddLeaf(tx, uint64(indexToCheck+1), 0, types.Leaf{ - Index: indexToCheck + 1, - Hash: common.HexToHash(fmt.Sprintf("%x", indexToCheck+1)), - })) - require.NoError(t, tx.Commit()) - - root2, err := merkleTree.GetRootByIndex(context.Background(), indexToCheck) - require.NoError(t, err) - require.Equal(t, expectedRoot.Hash, root2.Hash) - require.Equal(t, expectedRoot.Index, root2.Index) - }) -} - -func TestMTAddLeaf(t *testing.T) { - data, err := os.ReadFile("testvectors/root-vectors.json") - require.NoError(t, err) - - var mtTestVectors []testvectors.MTRootVectorRaw - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - ctx := context.Background() - - for ti, testVector := range mtTestVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - dbPath := path.Join(t.TempDir(), "treeTestMTAddLeaf.sqlite") - log.Debug("DB created at: ", dbPath) - err := migrations.RunMigrations(dbPath) - require.NoError(t, err) - treeDB, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - _, err = treeDB.Exec(`select * from root`) - require.NoError(t, err) - merkletree := tree.NewAppendOnlyTree(treeDB, "") - - // Add exisiting leaves - tx, err := db.NewTx(ctx, treeDB) - require.NoError(t, err) - for i, leaf := range testVector.ExistingLeaves { - err = merkletree.AddLeaf(tx, uint64(i), 0, types.Leaf{ - Index: uint32(i), - Hash: common.HexToHash(leaf), - }) - require.NoError(t, err) - } - require.NoError(t, tx.Commit()) - if len(testVector.ExistingLeaves) > 0 { - root, err := merkletree.GetLastRoot(nil) - require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.CurrentRoot), root.Hash) - } - - // Add new bridge - tx, err = db.NewTx(ctx, treeDB) - require.NoError(t, err) - err = merkletree.AddLeaf(tx, uint64(len(testVector.ExistingLeaves)), 0, types.Leaf{ - Index: uint32(len(testVector.ExistingLeaves)), - Hash: common.HexToHash(testVector.NewLeaf.CurrentHash), - }) - require.NoError(t, err) - require.NoError(t, tx.Commit()) - - root, err := merkletree.GetLastRoot(nil) - require.NoError(t, err) - require.Equal(t, common.HexToHash(testVector.NewRoot), root.Hash) - }) - } -} - -func TestMTGetProof(t *testing.T) { - data, err := os.ReadFile("testvectors/claim-vectors.json") - require.NoError(t, err) - - var mtTestVectors []testvectors.MTClaimVectorRaw - err = json.Unmarshal(data, &mtTestVectors) - require.NoError(t, err) - ctx := context.Background() - - for ti, testVector := range mtTestVectors { - t.Run(fmt.Sprintf("Test vector %d", ti), func(t *testing.T) { - dbPath := path.Join(t.TempDir(), "treeTestMTGetProof.sqlite") - err := migrations.RunMigrations(dbPath) - require.NoError(t, err) - treeDB, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - tre := tree.NewAppendOnlyTree(treeDB, "") - - tx, err := db.NewTx(ctx, treeDB) - require.NoError(t, err) - for li, leaf := range testVector.Deposits { - err = tre.AddLeaf(tx, uint64(li), 0, types.Leaf{ - Index: uint32(li), - Hash: leaf.Hash(), - }) - require.NoError(t, err) - } - require.NoError(t, tx.Commit()) - - root, err := tre.GetLastRoot(nil) - require.NoError(t, err) - expectedRoot := common.HexToHash(testVector.ExpectedRoot) - require.Equal(t, expectedRoot, root.Hash) - - proof, err := tre.GetProof(ctx, testVector.Index, expectedRoot) - require.NoError(t, err) - for i, sibling := range testVector.MerkleProof { - require.Equal(t, common.HexToHash(sibling), proof[i]) - } - }) - } -} - -func createTreeDBForTest(t *testing.T) *sql.DB { - t.Helper() - dbPath := path.Join(t.TempDir(), "tree_createTreeDBForTest.sqlite") - err := migrations.RunMigrations(dbPath) - require.NoError(t, err) - treeDB, err := db.NewSQLiteDB(dbPath) - require.NoError(t, err) - return treeDB -} diff --git a/tree/types/types.go b/tree/types/types.go deleted file mode 100644 index bb1173422..000000000 --- a/tree/types/types.go +++ /dev/null @@ -1,27 +0,0 @@ -package types - -import "github.com/ethereum/go-ethereum/common" - -const ( - DefaultHeight uint8 = 32 -) - -type Leaf struct { - Index uint32 - Hash common.Hash -} - -type Root struct { - Hash common.Hash `meddler:"hash,hash"` - Index uint32 `meddler:"position"` - BlockNum uint64 `meddler:"block_num"` - BlockPosition uint64 `meddler:"block_position"` -} - -type TreeNode struct { - Hash common.Hash `meddler:"hash,hash"` - Left common.Hash `meddler:"left,hash"` - Right common.Hash `meddler:"right,hash"` -} - -type Proof [DefaultHeight]common.Hash diff --git a/tree/updatabletree.go b/tree/updatabletree.go deleted file mode 100644 index be861b554..000000000 --- a/tree/updatabletree.go +++ /dev/null @@ -1,68 +0,0 @@ -package tree - -import ( - "database/sql" - "errors" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" -) - -// UpdatableTree is a tree that have updatable leaves, and doesn't need to have sequential inserts -type UpdatableTree struct { - *Tree -} - -// NewUpdatableTree returns an UpdatableTree -func NewUpdatableTree(db *sql.DB, dbPrefix string) *UpdatableTree { - t := newTree(db, dbPrefix) - ut := &UpdatableTree{ - Tree: t, - } - return ut -} - -func (t *UpdatableTree) UpsertLeaf(tx db.Txer, blockNum, blockPosition uint64, leaf types.Leaf) (common.Hash, error) { - var rootHash common.Hash - root, err := t.getLastRootWithTx(tx) - if err != nil { - if errors.Is(err, db.ErrNotFound) { - rootHash = t.zeroHashes[types.DefaultHeight] - } else { - return common.Hash{}, err - } - } else { - rootHash = root.Hash - } - siblings, _, err := t.getSiblings(tx, leaf.Index, rootHash) - if err != nil { - return common.Hash{}, err - } - currentChildHash := leaf.Hash - newNodes := []types.TreeNode{} - for h := uint8(0); h < types.DefaultHeight; h++ { - var parent types.TreeNode - if leaf.Index&(1< 0 { - // Add child to the right - parent = newTreeNode(siblings[h], currentChildHash) - } else { - // Add child to the left - parent = newTreeNode(currentChildHash, siblings[h]) - } - currentChildHash = parent.Hash - newNodes = append(newNodes, parent) - } - if err := t.storeRoot(tx, types.Root{ - Hash: currentChildHash, - Index: leaf.Index, - BlockNum: blockNum, - BlockPosition: blockPosition, - }); err != nil { - return common.Hash{}, err - } - if err := t.storeNodes(tx, newNodes); err != nil { - return common.Hash{}, err - } - return currentChildHash, nil -} diff --git a/tree/updatabletree_test.go b/tree/updatabletree_test.go deleted file mode 100644 index a684fd0e1..000000000 --- a/tree/updatabletree_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package tree_test - -import ( - "context" - "testing" - - "github.com/0xPolygon/cdk/db" - "github.com/0xPolygon/cdk/tree" - "github.com/0xPolygon/cdk/tree/types" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestUpdatableTreeExploratory(t *testing.T) { - treeDB := createTreeDBForTest(t) - sut := tree.NewUpdatableTree(treeDB, "") - blockNum := uint64(1) - blockPosition := uint64(1) - leaf1 := types.Leaf{ - Index: 10, - Hash: common.HexToHash("0x123456"), - } - leaf2 := types.Leaf{ - Index: 1, - Hash: common.HexToHash("0x123478"), - } - ctx := context.TODO() - - tx, err := db.NewTx(ctx, treeDB) - require.NoError(t, err) - _, err = sut.UpsertLeaf(tx, blockNum, blockPosition, leaf1) - require.NoError(t, err) - - root2, err := sut.UpsertLeaf(tx, blockNum, blockPosition, leaf2) - require.NoError(t, err) - leaf1get, err := sut.GetLeaf(tx, leaf1.Index, root2) - require.NoError(t, err) - require.Equal(t, leaf1.Hash, leaf1get) - // If a leaf dont exist return 'not found' error - _, err = sut.GetLeaf(tx, 99, root2) - require.ErrorIs(t, err, db.ErrNotFound) - leaf99 := types.Leaf{ - Index: 99, - Hash: common.Hash{}, // 0x00000 - } - - _, err = sut.UpsertLeaf(tx, blockNum, blockPosition, leaf99) - require.Error(t, err, "insert 0x000 doesnt change root and return UNIQUE constraint failed: root.hash") -}