From f8574c9b80c81193aa16b411cac676fe417f8881 Mon Sep 17 00:00:00 2001 From: John Letey Date: Thu, 9 Feb 2023 13:20:01 +0100 Subject: [PATCH] feat: init Co-authored-by: Maximilian Breithecker Co-authored-by: Troy Kessler --- .github/CHANGELOG.md | 73 + .github/README.md | 33 + .github/emergency_1295379.md | 53 + .github/mergify.yml | 16 + .github/workflows/lint.yml | 29 + .gitignore | 10 + CHANGELOG.md | 30 + CodeStructure.md | 18 + LICENSE | 21 + Makefile | 133 + app/ante.go | 80 + app/app.go | 909 ++ app/encoding.go | 50 + app/export.go | 194 + app/forks.go | 13 + app/genesis.go | 21 + app/keepers.go | 130 + app/modules.go | 152 + app/test_helpers.go | 163 + app/upgrades/v0_8_0/constants.go | 4 + app/upgrades/v0_8_0/store.go | 26 + app/upgrades/v0_8_0/upgrade.go | 75 + buf.work.yaml | 3 + cmd/kyved/app_creator.go | 117 + cmd/kyved/config.go | 32 + cmd/kyved/gen_accounts.go | 199 + cmd/kyved/main.go | 16 + cmd/kyved/root.go | 197 + config.yml | 170 + docs/config.json | 96 + docs/docs.go | 8 + docs/handler.go | 27 + docs/index.tpl | 24 + docs/swagger.yml | 8463 +++++++++++++++++ go.mod | 307 + go.sum | 1997 ++++ proto/Dockerfile | 26 + proto/buf.gen.yaml | 12 + proto/buf.lock | 19 + proto/buf.yaml | 18 + proto/generate.sh | 9 + proto/kyve/bundles/v1beta1/bundles.proto | 94 + proto/kyve/bundles/v1beta1/events.proto | 136 + proto/kyve/bundles/v1beta1/genesis.proto | 19 + proto/kyve/bundles/v1beta1/params.proto | 17 + proto/kyve/bundles/v1beta1/query.proto | 26 + proto/kyve/bundles/v1beta1/tx.proto | 124 + .../kyve/delegation/v1beta1/delegation.proto | 123 + proto/kyve/delegation/v1beta1/events.proto | 68 + proto/kyve/delegation/v1beta1/genesis.proto | 29 + proto/kyve/delegation/v1beta1/params.proto | 21 + proto/kyve/delegation/v1beta1/query.proto | 32 + proto/kyve/delegation/v1beta1/tx.proto | 88 + proto/kyve/global/v1beta1/genesis.proto | 14 + proto/kyve/global/v1beta1/global.proto | 62 + proto/kyve/global/v1beta1/query.proto | 26 + proto/kyve/global/v1beta1/tx.proto | 27 + proto/kyve/pool/v1beta1/events.proto | 86 + proto/kyve/pool/v1beta1/genesis.proto | 19 + proto/kyve/pool/v1beta1/pool.proto | 116 + proto/kyve/pool/v1beta1/tx.proto | 160 + proto/kyve/query/v1beta1/account.proto | 146 + proto/kyve/query/v1beta1/bundles.proto | 194 + proto/kyve/query/v1beta1/delegation.proto | 116 + proto/kyve/query/v1beta1/params.proto | 41 + proto/kyve/query/v1beta1/pools.proto | 84 + proto/kyve/query/v1beta1/query.proto | 155 + proto/kyve/query/v1beta1/stakers.proto | 129 + proto/kyve/stakers/v1beta1/events.proto | 59 + proto/kyve/stakers/v1beta1/genesis.proto | 27 + proto/kyve/stakers/v1beta1/params.proto | 13 + proto/kyve/stakers/v1beta1/query.proto | 26 + proto/kyve/stakers/v1beta1/stakers.proto | 86 + proto/kyve/stakers/v1beta1/tx.proto | 100 + proto/kyve/team/v1beta1/events.proto | 59 + proto/kyve/team/v1beta1/genesis.proto | 18 + proto/kyve/team/v1beta1/query.proto | 191 + proto/kyve/team/v1beta1/team.proto | 34 + proto/kyve/team/v1beta1/tx.proto | 90 + testutil/integration/checks.go | 485 + testutil/integration/helpers.go | 43 + testutil/integration/integration.go | 253 + testutil/integration/transactions.go | 87 + testutil/keeper/bundles.go | 54 + testutil/keeper/pool.go | 57 + testutil/keeper/query.go | 59 + testutil/keeper/stakers.go | 54 + testutil/nullify/nullify.go | 57 + testutil/sample/sample.go | 13 + tools/tools.go | 12 + util/arrays.go | 40 + util/logic.go | 37 + util/logic_bank.go | 96 + util/logic_error.go | 39 + util/logic_math.go | 22 + util/validate.go | 36 + x/bundles/client/cli/query.go | 31 + x/bundles/client/cli/query_params.go | 34 + x/bundles/client/cli/tx.go | 27 + .../client/cli/tx_claim_uploader_role.go | 45 + x/bundles/client/cli/tx_skip_uploader_role.go | 51 + .../client/cli/tx_submit_bundle_proposal.go | 78 + x/bundles/client/cli/tx_vote_proposal.go | 54 + x/bundles/genesis.go | 34 + x/bundles/keeper/getters_bundles.go | 158 + x/bundles/keeper/getters_params.go | 46 + x/bundles/keeper/grpc_query.go | 7 + x/bundles/keeper/grpc_query_params.go | 19 + x/bundles/keeper/keeper.go | 84 + .../keeper_suite_dropped_bundles_test.go | 184 + .../keeper_suite_funding_bundles_test.go | 674 ++ .../keeper_suite_invalid_bundles_test.go | 560 ++ x/bundles/keeper/keeper_suite_points_test.go | 471 + .../keeper/keeper_suite_stakers_leave_test.go | 592 ++ x/bundles/keeper/keeper_suite_test.go | 16 + .../keeper/keeper_suite_valid_bundles_test.go | 1142 +++ .../keeper_suite_zero_delegation_test.go | 786 ++ x/bundles/keeper/logic_bundles.go | 544 ++ x/bundles/keeper/logic_bundles_test.go | 982 ++ .../logic_end_block_handle_upload_timeout.go | 84 + ...ic_end_block_handle_upload_timeout_test.go | 994 ++ x/bundles/keeper/msg_server.go | 17 + .../keeper/msg_server_claim_uploader_role.go | 60 + .../msg_server_claim_uploader_role_test.go | 273 + .../keeper/msg_server_skip_uploader_role.go | 56 + .../msg_server_skip_uploader_role_test.go | 269 + .../msg_server_submit_bundle_proposal.go | 158 + .../msg_server_submit_bundle_proposal_test.go | 390 + x/bundles/keeper/msg_server_update_params.go | 30 + .../keeper/msg_server_update_params_test.go | 493 + .../keeper/msg_server_vote_bundle_proposal.go | 59 + .../msg_server_vote_bundle_proposal_test.go | 294 + x/bundles/module.go | 162 + x/bundles/spec/01_concepts.md | 56 + x/bundles/spec/02_state.md | 67 + x/bundles/spec/03_messages.md | 41 + x/bundles/spec/04_end_block.md | 15 + x/bundles/spec/05_params.md | 14 + x/bundles/spec/06_events.md | 217 + x/bundles/spec/07_exported.md | 8 + x/bundles/types/bundles.pb.go | 1781 ++++ x/bundles/types/codec.go | 22 + x/bundles/types/errors.go | 26 + x/bundles/types/events.pb.go | 2859 ++++++ x/bundles/types/expected_keepers.go | 61 + x/bundles/types/genesis.go | 52 + x/bundles/types/genesis.pb.go | 456 + x/bundles/types/keys.go | 45 + .../types/message_claim_uploader_role.go | 47 + x/bundles/types/message_skip_uploader_role.go | 48 + .../types/message_submit_bundle_proposal.go | 55 + .../types/message_vote_bundle_proposal.go | 49 + x/bundles/types/msgs.go | 35 + x/bundles/types/params.go | 63 + x/bundles/types/params.pb.go | 430 + x/bundles/types/query.pb.go | 536 ++ x/bundles/types/query.pb.gw.go | 153 + x/bundles/types/tx.pb.go | 2816 ++++++ x/bundles/types/types.go | 25 + x/delegation/client/cli/query.go | 32 + x/delegation/client/cli/query_params.go | 34 + x/delegation/client/cli/tx.go | 28 + x/delegation/client/cli/tx_delegate.go | 45 + x/delegation/client/cli/tx_redelegate.go | 46 + x/delegation/client/cli/tx_undelegate.go | 45 + .../client/cli/tx_withdraw_rewards.go | 38 + x/delegation/genesis.go | 64 + x/delegation/keeper/exported_functions.go | 98 + .../keeper/getters_delegation_data.go | 71 + .../keeper/getters_delegation_entries.go | 73 + .../keeper/getters_delegation_slash.go | 90 + x/delegation/keeper/getters_delegator.go | 104 + x/delegation/keeper/getters_index_stakers.go | 158 + x/delegation/keeper/getters_params.go | 69 + x/delegation/keeper/getters_redelegation.go | 56 + x/delegation/keeper/getters_undelegation.go | 111 + x/delegation/keeper/grpc_query.go | 21 + x/delegation/keeper/keeper.go | 82 + x/delegation/keeper/keeper_suite_test.go | 61 + x/delegation/keeper/logic_delegation.go | 73 + x/delegation/keeper/logic_f1distribution.go | 340 + x/delegation/keeper/logic_redelegation.go | 46 + x/delegation/keeper/logic_unbonding.go | 87 + x/delegation/keeper/msg_server.go | 17 + x/delegation/keeper/msg_server_delegate.go | 40 + .../keeper/msg_server_delegate_test.go | 295 + x/delegation/keeper/msg_server_redelegate.go | 60 + .../keeper/msg_server_redelegate_test.go | 336 + x/delegation/keeper/msg_server_undelegate.go | 26 + .../keeper/msg_server_undelegate_test.go | 581 ++ .../keeper/msg_server_update_params.go | 34 + .../keeper/msg_server_update_params_test.go | 676 ++ .../keeper/msg_server_withdraw_rewards.go | 39 + .../msg_server_withdraw_rewards_test.go | 166 + x/delegation/module.go | 161 + x/delegation/spec/01_concepts.md | 45 + x/delegation/spec/02_state.md | 145 + x/delegation/spec/03_messages.md | 43 + x/delegation/spec/04_end_block.md | 14 + x/delegation/spec/05_events.md | 42 + x/delegation/spec/06_params.md | 13 + x/delegation/spec/07_exported.md | 41 + x/delegation/types/codec.go | 22 + x/delegation/types/delegation.pb.go | 2121 +++++ x/delegation/types/errors.go | 14 + x/delegation/types/events.pb.go | 1536 +++ x/delegation/types/expected_keepers.go | 40 + x/delegation/types/genesis.go | 135 + x/delegation/types/genesis.pb.go | 771 ++ x/delegation/types/keys.go | 96 + x/delegation/types/message_delegate.go | 45 + x/delegation/types/message_redelegate.go | 40 + x/delegation/types/message_undelegate.go | 46 + .../types/message_withdraw_rewards.go | 46 + x/delegation/types/msgs.go | 35 + x/delegation/types/params.go | 83 + x/delegation/types/params.pb.go | 540 ++ x/delegation/types/query.pb.go | 538 ++ x/delegation/types/query.pb.gw.go | 153 + x/delegation/types/tx.pb.go | 2288 +++++ x/delegation/types/types.go | 1 + x/global/abci.go | 49 + x/global/abci_test.go | 154 + x/global/ante.go | 149 + x/global/ante_test.go | 609 ++ x/global/ante_utils_test.go | 78 + x/global/client/cli/query.go | 25 + x/global/client/cli/query_params.go | 34 + x/global/genesis.go | 18 + x/global/keeper/getters_params.go | 51 + x/global/keeper/grpc_query.go | 7 + x/global/keeper/grpc_query_params.go | 19 + x/global/keeper/keeper.go | 36 + x/global/keeper/keeper_test.go | 16 + x/global/keeper/msg_server.go | 17 + x/global/keeper/msg_server_update_params.go | 29 + .../keeper/msg_server_update_params_test.go | 629 ++ x/global/module.go | 169 + x/global/module_test.go | 16 + x/global/post.go | 82 + x/global/post_test.go | 226 + x/global/types/codec.go | 18 + x/global/types/genesis.go | 20 + x/global/types/genesis.pb.go | 323 + x/global/types/global.pb.go | 990 ++ x/global/types/keys.go | 17 + x/global/types/msgs.go | 34 + x/global/types/params.go | 172 + x/global/types/query.pb.go | 536 ++ x/global/types/query.pb.gw.go | 153 + x/global/types/tx.pb.go | 588 ++ x/global/types/types.go | 3 + x/global/utils.go | 103 + x/pool/client/cli/tx.go | 25 + x/pool/client/cli/tx_defund_pool.go | 47 + x/pool/client/cli/tx_fund_pool.go | 47 + x/pool/genesis.go | 26 + x/pool/keeper/getters_pool.go | 137 + x/pool/keeper/keeper.go | 70 + x/pool/keeper/keeper_test.go | 16 + x/pool/keeper/keeper_utils_test.go | 25 + .../logic_end_block_handle_pool_upgrades.go | 31 + ...gic_end_block_handle_pool_upgrades_test.go | 245 + x/pool/keeper/logic_funders.go | 78 + x/pool/keeper/logic_funders_test.go | 285 + x/pool/keeper/logic_pool.go | 45 + x/pool/keeper/msg_server.go | 17 + .../msg_server_cancel_runtime_upgrade.go | 35 + .../msg_server_cancel_runtime_upgrade_test.go | 184 + x/pool/keeper/msg_server_create_pool.go | 67 + x/pool/keeper/msg_server_create_pool_test.go | 307 + x/pool/keeper/msg_server_defund_pool.go | 54 + x/pool/keeper/msg_server_defund_pool_test.go | 149 + x/pool/keeper/msg_server_disable_pool.go | 42 + x/pool/keeper/msg_server_disable_pool_test.go | 492 + x/pool/keeper/msg_server_enable_pool.go | 35 + x/pool/keeper/msg_server_enable_pool_test.go | 203 + x/pool/keeper/msg_server_fund_pool.go | 70 + x/pool/keeper/msg_server_fund_pool_test.go | 278 + .../msg_server_schedule_runtime_upgrade.go | 56 + ...sg_server_schedule_runtime_upgrade_test.go | 291 + x/pool/keeper/msg_server_update_pool.go | 79 + x/pool/keeper/msg_server_update_pool_test.go | 313 + x/pool/module.go | 157 + x/pool/spec/01_concepts.md | 28 + x/pool/spec/02_state.md | 127 + x/pool/spec/03_messages.md | 65 + x/pool/spec/04_end_block.md | 10 + x/pool/spec/05_params.md | 7 + x/pool/spec/06_events.md | 141 + x/pool/spec/07_exported.md | 37 + x/pool/types/codec.go | 26 + x/pool/types/errors.go | 15 + x/pool/types/events.pb.go | 1803 ++++ x/pool/types/expected_keepers.go | 17 + x/pool/types/genesis.go | 30 + x/pool/types/genesis.pb.go | 370 + x/pool/types/keys.go | 32 + x/pool/types/message_defund_pool.go | 47 + x/pool/types/message_fund_pool.go | 47 + x/pool/types/msg.go | 105 + x/pool/types/pool.go | 73 + x/pool/types/pool.pb.go | 2087 ++++ x/pool/types/tx.pb.go | 4053 ++++++++ x/pool/types/types.go | 1 + x/query/client/cli/query.go | 56 + x/query/client/cli/query_account_assets.go | 41 + .../query_account_delegation_unbondings.go | 48 + x/query/client/cli/query_account_funded.go | 41 + .../client/cli/query_account_redelegation.go | 41 + x/query/client/cli/query_can_propose.go | 56 + x/query/client/cli/query_can_validate.go | 50 + x/query/client/cli/query_can_vote.go | 53 + .../client/cli/query_current_vote_status.go | 49 + .../client/cli/query_delegation_delegator.go | 47 + .../query_delegation_delegators_by_staker.go | 52 + .../query_delegation_stakers_by_delegator.go | 52 + x/query/client/cli/query_finalized_bundles.go | 94 + x/query/client/cli/query_pool.go | 96 + x/query/client/cli/query_staker.go | 78 + x/query/client/cli/query_stakers_by_pool.go | 47 + x/query/keeper/grpc_account_asssets.go | 82 + .../grpc_account_delegation_unbondings.go | 48 + x/query/keeper/grpc_account_funded.go | 40 + x/query/keeper/grpc_account_redelegation.go | 38 + .../keeper/grpc_account_redelegation_test.go | 149 + x/query/keeper/grpc_current_vote_status.go | 33 + x/query/keeper/grpc_delegation_delegator.go | 31 + .../grpc_delegation_delegators_by_staker.go | 56 + .../grpc_delegation_stakers_by_delegator.go | 49 + x/query/keeper/grpc_params.go | 36 + x/query/keeper/grpc_query.go | 14 + x/query/keeper/grpc_query_can_propose.go | 30 + x/query/keeper/grpc_query_can_propose_test.go | 556 ++ x/query/keeper/grpc_query_can_validate.go | 47 + .../keeper/grpc_query_can_validate_test.go | 133 + x/query/keeper/grpc_query_can_vote.go | 41 + x/query/keeper/grpc_query_can_vote_test.go | 500 + x/query/keeper/grpc_query_finalized_bundle.go | 53 + x/query/keeper/grpc_query_pool.go | 67 + x/query/keeper/grpc_query_staker.go | 72 + x/query/keeper/grpc_query_stakers_by_pool.go | 38 + .../grpc_query_stakers_by_pool_count.go | 31 + x/query/keeper/helper.go | 97 + x/query/keeper/keeper.go | 85 + x/query/keeper/keeper_suite_test.go | 13 + x/query/module.go | 152 + x/query/types/account.pb.go | 2827 ++++++ x/query/types/account.pb.gw.go | 510 + x/query/types/bundles.pb.go | 3584 +++++++ x/query/types/bundles.pb.gw.go | 1011 ++ x/query/types/codec.go | 11 + x/query/types/delegation.pb.go | 2382 +++++ x/query/types/delegation.pb.gw.go | 449 + x/query/types/expected_keepers.go | 18 + x/query/types/keys.go | 19 + x/query/types/params.pb.go | 799 ++ x/query/types/params.pb.gw.go | 153 + x/query/types/pools.pb.go | 1685 ++++ x/query/types/pools.pb.gw.go | 272 + x/query/types/query.pb.go | 2087 ++++ x/query/types/stakers.pb.go | 2286 +++++ x/query/types/stakers.pb.gw.go | 456 + x/query/types/types.go | 1 + x/stakers/client/cli/query.go | 31 + x/stakers/client/cli/query_params.go | 34 + x/stakers/client/cli/tx.go | 29 + x/stakers/client/cli/tx_join_pool.go | 53 + x/stakers/client/cli/tx_leave_pool.go | 44 + x/stakers/client/cli/tx_stake.go | 44 + x/stakers/client/cli/tx_update_commission.go | 38 + x/stakers/client/cli/tx_update_metadata.go | 40 + x/stakers/genesis.go | 54 + x/stakers/keeper/exported_functions.go | 132 + x/stakers/keeper/exported_functions_test.go | 176 + x/stakers/keeper/getters_commission.go | 77 + x/stakers/keeper/getters_leave.go | 88 + x/stakers/keeper/getters_params.go | 36 + x/stakers/keeper/getters_queue.go | 29 + x/stakers/keeper/getters_staker.go | 268 + x/stakers/keeper/getters_valaccount.go | 154 + x/stakers/keeper/grpc_query.go | 21 + x/stakers/keeper/keeper.go | 70 + x/stakers/keeper/keeper_suite_test.go | 16 + x/stakers/keeper/logic_commission.go | 60 + x/stakers/keeper/logic_leave.go | 47 + x/stakers/keeper/logic_queue.go | 45 + x/stakers/keeper/logic_stakers.go | 55 + x/stakers/keeper/msg_server.go | 17 + x/stakers/keeper/msg_server_create_staker.go | 48 + .../keeper/msg_server_create_staker_test.go | 193 + x/stakers/keeper/msg_server_join_pool.go | 87 + x/stakers/keeper/msg_server_join_pool_test.go | 865 ++ x/stakers/keeper/msg_server_leave_pool.go | 32 + .../keeper/msg_server_leave_pool_test.go | 303 + .../keeper/msg_server_update_commission.go | 37 + .../msg_server_update_commission_test.go | 250 + .../keeper/msg_server_update_metadata.go | 32 + .../keeper/msg_server_update_metadata_test.go | 131 + x/stakers/keeper/msg_server_update_params.go | 30 + .../keeper/msg_server_update_params_test.go | 333 + x/stakers/module.go | 161 + x/stakers/spec/01_concepts.md | 39 + x/stakers/spec/02_state.md | 140 + x/stakers/spec/03_messages.md | 44 + x/stakers/spec/04_end_block.md | 9 + x/stakers/spec/05_events.md | 108 + x/stakers/spec/06_params.md | 12 + x/stakers/spec/07_exported.md | 65 + x/stakers/types/codec.go | 23 + x/stakers/types/errors.go | 23 + x/stakers/types/events.pb.go | 1426 +++ x/stakers/types/expected_keepers.go | 39 + x/stakers/types/genesis.go | 69 + x/stakers/types/genesis.pb.go | 698 ++ x/stakers/types/keys.go | 99 + x/stakers/types/message_create_staker.go | 39 + x/stakers/types/message_join_pool.go | 39 + x/stakers/types/message_leavel_pool.go | 39 + x/stakers/types/message_update_commission.go | 39 + x/stakers/types/message_update_metadata.go | 52 + x/stakers/types/msgs.go | 35 + x/stakers/types/params.go | 43 + x/stakers/types/params.pb.go | 340 + x/stakers/types/query.pb.go | 536 ++ x/stakers/types/query.pb.gw.go | 153 + x/stakers/types/stakers.pb.go | 1670 ++++ x/stakers/types/tx.pb.go | 2658 ++++++ x/stakers/types/types.go | 1 + x/team/abci.go | 84 + x/team/client/cli/query.go | 25 + x/team/client/cli/tx.go | 28 + x/team/client/cli/tx_claim_account_rewards.go | 51 + .../client/cli/tx_claim_authority_rewards.go | 45 + x/team/client/cli/tx_claim_unlocked.go | 51 + x/team/client/cli/tx_clawback.go | 48 + .../cli/tx_create_team_vesting_account.go | 48 + x/team/genesis.go | 29 + x/team/keeper/abci_inflation_rewards_test.go | 118 + x/team/keeper/getters_team_vesting_account.go | 113 + x/team/keeper/grpc_query.go | 7 + .../grpc_query_vesting_status_by_time.go | 57 + x/team/keeper/grpc_team_info.go | 21 + x/team/keeper/grpc_team_vesting_account.go | 37 + x/team/keeper/grpc_team_vesting_status.go | 33 + x/team/keeper/keeper.go | 50 + x/team/keeper/keeper_test.go | 16 + x/team/keeper/logic_team.go | 161 + x/team/keeper/logic_team_test.go | 402 + x/team/keeper/msg_server.go | 15 + .../msg_server_claim_account_rewards.go | 47 + .../msg_server_claim_account_rewards_test.go | 180 + .../msg_server_claim_authority_rewards.go | 43 + ...msg_server_claim_authority_rewards_test.go | 108 + x/team/keeper/msg_server_claim_unlocked.go | 51 + .../keeper/msg_server_claim_unlocked_test.go | 223 + x/team/keeper/msg_server_clawback.go | 44 + x/team/keeper/msg_server_clawback_test.go | 434 + .../msg_server_create_team_vesting_account.go | 40 + ...server_create_team_vesting_account_test.go | 191 + x/team/module.go | 169 + x/team/spec/01_concepts.md | 55 + x/team/spec/02_state.md | 42 + x/team/spec/03_messages.md | 35 + x/team/spec/04_begin_block.md | 10 + x/team/spec/05_events.md | 74 + x/team/types/codec.go | 22 + x/team/types/errors.go | 10 + x/team/types/events.pb.go | 1314 +++ x/team/types/genesis.go | 31 + x/team/types/genesis.pb.go | 426 + x/team/types/keys.go | 51 + x/team/types/message_claim_account_rewards.go | 39 + .../types/message_claim_authority_rewards.go | 39 + x/team/types/message_claim_unlocked.go | 39 + x/team/types/message_clawback.go | 39 + .../message_create_team_vesting_account.go | 39 + x/team/types/query.pb.go | 3639 +++++++ x/team/types/query.pb.gw.go | 543 ++ x/team/types/team.pb.go | 757 ++ x/team/types/tx.pb.go | 2348 +++++ x/team/types/types.go | 74 + 482 files changed, 116935 insertions(+) create mode 100644 .github/CHANGELOG.md create mode 100644 .github/README.md create mode 100644 .github/emergency_1295379.md create mode 100644 .github/mergify.yml create mode 100644 .github/workflows/lint.yml create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 CodeStructure.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 app/ante.go create mode 100644 app/app.go create mode 100644 app/encoding.go create mode 100644 app/export.go create mode 100644 app/forks.go create mode 100644 app/genesis.go create mode 100644 app/keepers.go create mode 100644 app/modules.go create mode 100644 app/test_helpers.go create mode 100644 app/upgrades/v0_8_0/constants.go create mode 100644 app/upgrades/v0_8_0/store.go create mode 100644 app/upgrades/v0_8_0/upgrade.go create mode 100644 buf.work.yaml create mode 100644 cmd/kyved/app_creator.go create mode 100644 cmd/kyved/config.go create mode 100644 cmd/kyved/gen_accounts.go create mode 100644 cmd/kyved/main.go create mode 100644 cmd/kyved/root.go create mode 100644 config.yml create mode 100644 docs/config.json create mode 100644 docs/docs.go create mode 100644 docs/handler.go create mode 100644 docs/index.tpl create mode 100644 docs/swagger.yml create mode 100644 go.mod create mode 100644 go.sum create mode 100644 proto/Dockerfile create mode 100644 proto/buf.gen.yaml create mode 100644 proto/buf.lock create mode 100644 proto/buf.yaml create mode 100644 proto/generate.sh create mode 100644 proto/kyve/bundles/v1beta1/bundles.proto create mode 100644 proto/kyve/bundles/v1beta1/events.proto create mode 100644 proto/kyve/bundles/v1beta1/genesis.proto create mode 100644 proto/kyve/bundles/v1beta1/params.proto create mode 100644 proto/kyve/bundles/v1beta1/query.proto create mode 100644 proto/kyve/bundles/v1beta1/tx.proto create mode 100644 proto/kyve/delegation/v1beta1/delegation.proto create mode 100644 proto/kyve/delegation/v1beta1/events.proto create mode 100644 proto/kyve/delegation/v1beta1/genesis.proto create mode 100644 proto/kyve/delegation/v1beta1/params.proto create mode 100644 proto/kyve/delegation/v1beta1/query.proto create mode 100644 proto/kyve/delegation/v1beta1/tx.proto create mode 100644 proto/kyve/global/v1beta1/genesis.proto create mode 100644 proto/kyve/global/v1beta1/global.proto create mode 100644 proto/kyve/global/v1beta1/query.proto create mode 100644 proto/kyve/global/v1beta1/tx.proto create mode 100644 proto/kyve/pool/v1beta1/events.proto create mode 100644 proto/kyve/pool/v1beta1/genesis.proto create mode 100644 proto/kyve/pool/v1beta1/pool.proto create mode 100644 proto/kyve/pool/v1beta1/tx.proto create mode 100644 proto/kyve/query/v1beta1/account.proto create mode 100644 proto/kyve/query/v1beta1/bundles.proto create mode 100644 proto/kyve/query/v1beta1/delegation.proto create mode 100644 proto/kyve/query/v1beta1/params.proto create mode 100644 proto/kyve/query/v1beta1/pools.proto create mode 100644 proto/kyve/query/v1beta1/query.proto create mode 100644 proto/kyve/query/v1beta1/stakers.proto create mode 100644 proto/kyve/stakers/v1beta1/events.proto create mode 100644 proto/kyve/stakers/v1beta1/genesis.proto create mode 100644 proto/kyve/stakers/v1beta1/params.proto create mode 100644 proto/kyve/stakers/v1beta1/query.proto create mode 100644 proto/kyve/stakers/v1beta1/stakers.proto create mode 100644 proto/kyve/stakers/v1beta1/tx.proto create mode 100644 proto/kyve/team/v1beta1/events.proto create mode 100644 proto/kyve/team/v1beta1/genesis.proto create mode 100644 proto/kyve/team/v1beta1/query.proto create mode 100644 proto/kyve/team/v1beta1/team.proto create mode 100644 proto/kyve/team/v1beta1/tx.proto create mode 100644 testutil/integration/checks.go create mode 100644 testutil/integration/helpers.go create mode 100644 testutil/integration/integration.go create mode 100644 testutil/integration/transactions.go create mode 100644 testutil/keeper/bundles.go create mode 100644 testutil/keeper/pool.go create mode 100644 testutil/keeper/query.go create mode 100644 testutil/keeper/stakers.go create mode 100644 testutil/nullify/nullify.go create mode 100644 testutil/sample/sample.go create mode 100644 tools/tools.go create mode 100644 util/arrays.go create mode 100644 util/logic.go create mode 100644 util/logic_bank.go create mode 100644 util/logic_error.go create mode 100644 util/logic_math.go create mode 100644 util/validate.go create mode 100644 x/bundles/client/cli/query.go create mode 100644 x/bundles/client/cli/query_params.go create mode 100644 x/bundles/client/cli/tx.go create mode 100644 x/bundles/client/cli/tx_claim_uploader_role.go create mode 100644 x/bundles/client/cli/tx_skip_uploader_role.go create mode 100644 x/bundles/client/cli/tx_submit_bundle_proposal.go create mode 100644 x/bundles/client/cli/tx_vote_proposal.go create mode 100644 x/bundles/genesis.go create mode 100644 x/bundles/keeper/getters_bundles.go create mode 100644 x/bundles/keeper/getters_params.go create mode 100644 x/bundles/keeper/grpc_query.go create mode 100644 x/bundles/keeper/grpc_query_params.go create mode 100644 x/bundles/keeper/keeper.go create mode 100644 x/bundles/keeper/keeper_suite_dropped_bundles_test.go create mode 100644 x/bundles/keeper/keeper_suite_funding_bundles_test.go create mode 100644 x/bundles/keeper/keeper_suite_invalid_bundles_test.go create mode 100644 x/bundles/keeper/keeper_suite_points_test.go create mode 100644 x/bundles/keeper/keeper_suite_stakers_leave_test.go create mode 100644 x/bundles/keeper/keeper_suite_test.go create mode 100644 x/bundles/keeper/keeper_suite_valid_bundles_test.go create mode 100644 x/bundles/keeper/keeper_suite_zero_delegation_test.go create mode 100644 x/bundles/keeper/logic_bundles.go create mode 100644 x/bundles/keeper/logic_bundles_test.go create mode 100644 x/bundles/keeper/logic_end_block_handle_upload_timeout.go create mode 100644 x/bundles/keeper/logic_end_block_handle_upload_timeout_test.go create mode 100644 x/bundles/keeper/msg_server.go create mode 100644 x/bundles/keeper/msg_server_claim_uploader_role.go create mode 100644 x/bundles/keeper/msg_server_claim_uploader_role_test.go create mode 100644 x/bundles/keeper/msg_server_skip_uploader_role.go create mode 100644 x/bundles/keeper/msg_server_skip_uploader_role_test.go create mode 100644 x/bundles/keeper/msg_server_submit_bundle_proposal.go create mode 100644 x/bundles/keeper/msg_server_submit_bundle_proposal_test.go create mode 100644 x/bundles/keeper/msg_server_update_params.go create mode 100644 x/bundles/keeper/msg_server_update_params_test.go create mode 100644 x/bundles/keeper/msg_server_vote_bundle_proposal.go create mode 100644 x/bundles/keeper/msg_server_vote_bundle_proposal_test.go create mode 100644 x/bundles/module.go create mode 100644 x/bundles/spec/01_concepts.md create mode 100644 x/bundles/spec/02_state.md create mode 100644 x/bundles/spec/03_messages.md create mode 100644 x/bundles/spec/04_end_block.md create mode 100644 x/bundles/spec/05_params.md create mode 100644 x/bundles/spec/06_events.md create mode 100644 x/bundles/spec/07_exported.md create mode 100644 x/bundles/types/bundles.pb.go create mode 100644 x/bundles/types/codec.go create mode 100644 x/bundles/types/errors.go create mode 100644 x/bundles/types/events.pb.go create mode 100644 x/bundles/types/expected_keepers.go create mode 100644 x/bundles/types/genesis.go create mode 100644 x/bundles/types/genesis.pb.go create mode 100644 x/bundles/types/keys.go create mode 100644 x/bundles/types/message_claim_uploader_role.go create mode 100644 x/bundles/types/message_skip_uploader_role.go create mode 100644 x/bundles/types/message_submit_bundle_proposal.go create mode 100644 x/bundles/types/message_vote_bundle_proposal.go create mode 100644 x/bundles/types/msgs.go create mode 100644 x/bundles/types/params.go create mode 100644 x/bundles/types/params.pb.go create mode 100644 x/bundles/types/query.pb.go create mode 100644 x/bundles/types/query.pb.gw.go create mode 100644 x/bundles/types/tx.pb.go create mode 100644 x/bundles/types/types.go create mode 100644 x/delegation/client/cli/query.go create mode 100644 x/delegation/client/cli/query_params.go create mode 100644 x/delegation/client/cli/tx.go create mode 100644 x/delegation/client/cli/tx_delegate.go create mode 100644 x/delegation/client/cli/tx_redelegate.go create mode 100644 x/delegation/client/cli/tx_undelegate.go create mode 100644 x/delegation/client/cli/tx_withdraw_rewards.go create mode 100644 x/delegation/genesis.go create mode 100644 x/delegation/keeper/exported_functions.go create mode 100644 x/delegation/keeper/getters_delegation_data.go create mode 100644 x/delegation/keeper/getters_delegation_entries.go create mode 100644 x/delegation/keeper/getters_delegation_slash.go create mode 100644 x/delegation/keeper/getters_delegator.go create mode 100644 x/delegation/keeper/getters_index_stakers.go create mode 100644 x/delegation/keeper/getters_params.go create mode 100644 x/delegation/keeper/getters_redelegation.go create mode 100644 x/delegation/keeper/getters_undelegation.go create mode 100644 x/delegation/keeper/grpc_query.go create mode 100644 x/delegation/keeper/keeper.go create mode 100644 x/delegation/keeper/keeper_suite_test.go create mode 100644 x/delegation/keeper/logic_delegation.go create mode 100644 x/delegation/keeper/logic_f1distribution.go create mode 100644 x/delegation/keeper/logic_redelegation.go create mode 100644 x/delegation/keeper/logic_unbonding.go create mode 100644 x/delegation/keeper/msg_server.go create mode 100644 x/delegation/keeper/msg_server_delegate.go create mode 100644 x/delegation/keeper/msg_server_delegate_test.go create mode 100644 x/delegation/keeper/msg_server_redelegate.go create mode 100644 x/delegation/keeper/msg_server_redelegate_test.go create mode 100644 x/delegation/keeper/msg_server_undelegate.go create mode 100644 x/delegation/keeper/msg_server_undelegate_test.go create mode 100644 x/delegation/keeper/msg_server_update_params.go create mode 100644 x/delegation/keeper/msg_server_update_params_test.go create mode 100644 x/delegation/keeper/msg_server_withdraw_rewards.go create mode 100644 x/delegation/keeper/msg_server_withdraw_rewards_test.go create mode 100644 x/delegation/module.go create mode 100644 x/delegation/spec/01_concepts.md create mode 100644 x/delegation/spec/02_state.md create mode 100644 x/delegation/spec/03_messages.md create mode 100644 x/delegation/spec/04_end_block.md create mode 100644 x/delegation/spec/05_events.md create mode 100644 x/delegation/spec/06_params.md create mode 100644 x/delegation/spec/07_exported.md create mode 100644 x/delegation/types/codec.go create mode 100644 x/delegation/types/delegation.pb.go create mode 100644 x/delegation/types/errors.go create mode 100644 x/delegation/types/events.pb.go create mode 100644 x/delegation/types/expected_keepers.go create mode 100644 x/delegation/types/genesis.go create mode 100644 x/delegation/types/genesis.pb.go create mode 100644 x/delegation/types/keys.go create mode 100644 x/delegation/types/message_delegate.go create mode 100644 x/delegation/types/message_redelegate.go create mode 100644 x/delegation/types/message_undelegate.go create mode 100644 x/delegation/types/message_withdraw_rewards.go create mode 100644 x/delegation/types/msgs.go create mode 100644 x/delegation/types/params.go create mode 100644 x/delegation/types/params.pb.go create mode 100644 x/delegation/types/query.pb.go create mode 100644 x/delegation/types/query.pb.gw.go create mode 100644 x/delegation/types/tx.pb.go create mode 100644 x/delegation/types/types.go create mode 100644 x/global/abci.go create mode 100644 x/global/abci_test.go create mode 100644 x/global/ante.go create mode 100644 x/global/ante_test.go create mode 100644 x/global/ante_utils_test.go create mode 100644 x/global/client/cli/query.go create mode 100644 x/global/client/cli/query_params.go create mode 100644 x/global/genesis.go create mode 100644 x/global/keeper/getters_params.go create mode 100644 x/global/keeper/grpc_query.go create mode 100644 x/global/keeper/grpc_query_params.go create mode 100644 x/global/keeper/keeper.go create mode 100644 x/global/keeper/keeper_test.go create mode 100644 x/global/keeper/msg_server.go create mode 100644 x/global/keeper/msg_server_update_params.go create mode 100644 x/global/keeper/msg_server_update_params_test.go create mode 100644 x/global/module.go create mode 100644 x/global/module_test.go create mode 100644 x/global/post.go create mode 100644 x/global/post_test.go create mode 100644 x/global/types/codec.go create mode 100644 x/global/types/genesis.go create mode 100644 x/global/types/genesis.pb.go create mode 100644 x/global/types/global.pb.go create mode 100644 x/global/types/keys.go create mode 100644 x/global/types/msgs.go create mode 100644 x/global/types/params.go create mode 100644 x/global/types/query.pb.go create mode 100644 x/global/types/query.pb.gw.go create mode 100644 x/global/types/tx.pb.go create mode 100644 x/global/types/types.go create mode 100644 x/global/utils.go create mode 100644 x/pool/client/cli/tx.go create mode 100644 x/pool/client/cli/tx_defund_pool.go create mode 100644 x/pool/client/cli/tx_fund_pool.go create mode 100644 x/pool/genesis.go create mode 100644 x/pool/keeper/getters_pool.go create mode 100644 x/pool/keeper/keeper.go create mode 100644 x/pool/keeper/keeper_test.go create mode 100644 x/pool/keeper/keeper_utils_test.go create mode 100644 x/pool/keeper/logic_end_block_handle_pool_upgrades.go create mode 100644 x/pool/keeper/logic_end_block_handle_pool_upgrades_test.go create mode 100644 x/pool/keeper/logic_funders.go create mode 100644 x/pool/keeper/logic_funders_test.go create mode 100644 x/pool/keeper/logic_pool.go create mode 100644 x/pool/keeper/msg_server.go create mode 100644 x/pool/keeper/msg_server_cancel_runtime_upgrade.go create mode 100644 x/pool/keeper/msg_server_cancel_runtime_upgrade_test.go create mode 100644 x/pool/keeper/msg_server_create_pool.go create mode 100644 x/pool/keeper/msg_server_create_pool_test.go create mode 100644 x/pool/keeper/msg_server_defund_pool.go create mode 100644 x/pool/keeper/msg_server_defund_pool_test.go create mode 100644 x/pool/keeper/msg_server_disable_pool.go create mode 100644 x/pool/keeper/msg_server_disable_pool_test.go create mode 100644 x/pool/keeper/msg_server_enable_pool.go create mode 100644 x/pool/keeper/msg_server_enable_pool_test.go create mode 100644 x/pool/keeper/msg_server_fund_pool.go create mode 100644 x/pool/keeper/msg_server_fund_pool_test.go create mode 100644 x/pool/keeper/msg_server_schedule_runtime_upgrade.go create mode 100644 x/pool/keeper/msg_server_schedule_runtime_upgrade_test.go create mode 100644 x/pool/keeper/msg_server_update_pool.go create mode 100644 x/pool/keeper/msg_server_update_pool_test.go create mode 100644 x/pool/module.go create mode 100644 x/pool/spec/01_concepts.md create mode 100644 x/pool/spec/02_state.md create mode 100644 x/pool/spec/03_messages.md create mode 100644 x/pool/spec/04_end_block.md create mode 100644 x/pool/spec/05_params.md create mode 100644 x/pool/spec/06_events.md create mode 100644 x/pool/spec/07_exported.md create mode 100644 x/pool/types/codec.go create mode 100644 x/pool/types/errors.go create mode 100644 x/pool/types/events.pb.go create mode 100644 x/pool/types/expected_keepers.go create mode 100644 x/pool/types/genesis.go create mode 100644 x/pool/types/genesis.pb.go create mode 100644 x/pool/types/keys.go create mode 100644 x/pool/types/message_defund_pool.go create mode 100644 x/pool/types/message_fund_pool.go create mode 100644 x/pool/types/msg.go create mode 100644 x/pool/types/pool.go create mode 100644 x/pool/types/pool.pb.go create mode 100644 x/pool/types/tx.pb.go create mode 100644 x/pool/types/types.go create mode 100644 x/query/client/cli/query.go create mode 100644 x/query/client/cli/query_account_assets.go create mode 100644 x/query/client/cli/query_account_delegation_unbondings.go create mode 100644 x/query/client/cli/query_account_funded.go create mode 100644 x/query/client/cli/query_account_redelegation.go create mode 100644 x/query/client/cli/query_can_propose.go create mode 100644 x/query/client/cli/query_can_validate.go create mode 100644 x/query/client/cli/query_can_vote.go create mode 100644 x/query/client/cli/query_current_vote_status.go create mode 100644 x/query/client/cli/query_delegation_delegator.go create mode 100644 x/query/client/cli/query_delegation_delegators_by_staker.go create mode 100644 x/query/client/cli/query_delegation_stakers_by_delegator.go create mode 100644 x/query/client/cli/query_finalized_bundles.go create mode 100644 x/query/client/cli/query_pool.go create mode 100644 x/query/client/cli/query_staker.go create mode 100644 x/query/client/cli/query_stakers_by_pool.go create mode 100644 x/query/keeper/grpc_account_asssets.go create mode 100644 x/query/keeper/grpc_account_delegation_unbondings.go create mode 100644 x/query/keeper/grpc_account_funded.go create mode 100644 x/query/keeper/grpc_account_redelegation.go create mode 100644 x/query/keeper/grpc_account_redelegation_test.go create mode 100644 x/query/keeper/grpc_current_vote_status.go create mode 100644 x/query/keeper/grpc_delegation_delegator.go create mode 100644 x/query/keeper/grpc_delegation_delegators_by_staker.go create mode 100644 x/query/keeper/grpc_delegation_stakers_by_delegator.go create mode 100644 x/query/keeper/grpc_params.go create mode 100644 x/query/keeper/grpc_query.go create mode 100644 x/query/keeper/grpc_query_can_propose.go create mode 100644 x/query/keeper/grpc_query_can_propose_test.go create mode 100644 x/query/keeper/grpc_query_can_validate.go create mode 100644 x/query/keeper/grpc_query_can_validate_test.go create mode 100644 x/query/keeper/grpc_query_can_vote.go create mode 100644 x/query/keeper/grpc_query_can_vote_test.go create mode 100644 x/query/keeper/grpc_query_finalized_bundle.go create mode 100644 x/query/keeper/grpc_query_pool.go create mode 100644 x/query/keeper/grpc_query_staker.go create mode 100644 x/query/keeper/grpc_query_stakers_by_pool.go create mode 100644 x/query/keeper/grpc_query_stakers_by_pool_count.go create mode 100644 x/query/keeper/helper.go create mode 100644 x/query/keeper/keeper.go create mode 100644 x/query/keeper/keeper_suite_test.go create mode 100644 x/query/module.go create mode 100644 x/query/types/account.pb.go create mode 100644 x/query/types/account.pb.gw.go create mode 100644 x/query/types/bundles.pb.go create mode 100644 x/query/types/bundles.pb.gw.go create mode 100644 x/query/types/codec.go create mode 100644 x/query/types/delegation.pb.go create mode 100644 x/query/types/delegation.pb.gw.go create mode 100644 x/query/types/expected_keepers.go create mode 100644 x/query/types/keys.go create mode 100644 x/query/types/params.pb.go create mode 100644 x/query/types/params.pb.gw.go create mode 100644 x/query/types/pools.pb.go create mode 100644 x/query/types/pools.pb.gw.go create mode 100644 x/query/types/query.pb.go create mode 100644 x/query/types/stakers.pb.go create mode 100644 x/query/types/stakers.pb.gw.go create mode 100644 x/query/types/types.go create mode 100644 x/stakers/client/cli/query.go create mode 100644 x/stakers/client/cli/query_params.go create mode 100644 x/stakers/client/cli/tx.go create mode 100644 x/stakers/client/cli/tx_join_pool.go create mode 100644 x/stakers/client/cli/tx_leave_pool.go create mode 100644 x/stakers/client/cli/tx_stake.go create mode 100644 x/stakers/client/cli/tx_update_commission.go create mode 100644 x/stakers/client/cli/tx_update_metadata.go create mode 100644 x/stakers/genesis.go create mode 100644 x/stakers/keeper/exported_functions.go create mode 100644 x/stakers/keeper/exported_functions_test.go create mode 100644 x/stakers/keeper/getters_commission.go create mode 100644 x/stakers/keeper/getters_leave.go create mode 100644 x/stakers/keeper/getters_params.go create mode 100644 x/stakers/keeper/getters_queue.go create mode 100644 x/stakers/keeper/getters_staker.go create mode 100644 x/stakers/keeper/getters_valaccount.go create mode 100644 x/stakers/keeper/grpc_query.go create mode 100644 x/stakers/keeper/keeper.go create mode 100644 x/stakers/keeper/keeper_suite_test.go create mode 100644 x/stakers/keeper/logic_commission.go create mode 100644 x/stakers/keeper/logic_leave.go create mode 100644 x/stakers/keeper/logic_queue.go create mode 100644 x/stakers/keeper/logic_stakers.go create mode 100644 x/stakers/keeper/msg_server.go create mode 100644 x/stakers/keeper/msg_server_create_staker.go create mode 100644 x/stakers/keeper/msg_server_create_staker_test.go create mode 100644 x/stakers/keeper/msg_server_join_pool.go create mode 100644 x/stakers/keeper/msg_server_join_pool_test.go create mode 100644 x/stakers/keeper/msg_server_leave_pool.go create mode 100644 x/stakers/keeper/msg_server_leave_pool_test.go create mode 100644 x/stakers/keeper/msg_server_update_commission.go create mode 100644 x/stakers/keeper/msg_server_update_commission_test.go create mode 100644 x/stakers/keeper/msg_server_update_metadata.go create mode 100644 x/stakers/keeper/msg_server_update_metadata_test.go create mode 100644 x/stakers/keeper/msg_server_update_params.go create mode 100644 x/stakers/keeper/msg_server_update_params_test.go create mode 100644 x/stakers/module.go create mode 100644 x/stakers/spec/01_concepts.md create mode 100644 x/stakers/spec/02_state.md create mode 100644 x/stakers/spec/03_messages.md create mode 100644 x/stakers/spec/04_end_block.md create mode 100644 x/stakers/spec/05_events.md create mode 100644 x/stakers/spec/06_params.md create mode 100644 x/stakers/spec/07_exported.md create mode 100644 x/stakers/types/codec.go create mode 100644 x/stakers/types/errors.go create mode 100644 x/stakers/types/events.pb.go create mode 100644 x/stakers/types/expected_keepers.go create mode 100644 x/stakers/types/genesis.go create mode 100644 x/stakers/types/genesis.pb.go create mode 100644 x/stakers/types/keys.go create mode 100644 x/stakers/types/message_create_staker.go create mode 100644 x/stakers/types/message_join_pool.go create mode 100644 x/stakers/types/message_leavel_pool.go create mode 100644 x/stakers/types/message_update_commission.go create mode 100644 x/stakers/types/message_update_metadata.go create mode 100644 x/stakers/types/msgs.go create mode 100644 x/stakers/types/params.go create mode 100644 x/stakers/types/params.pb.go create mode 100644 x/stakers/types/query.pb.go create mode 100644 x/stakers/types/query.pb.gw.go create mode 100644 x/stakers/types/stakers.pb.go create mode 100644 x/stakers/types/tx.pb.go create mode 100644 x/stakers/types/types.go create mode 100644 x/team/abci.go create mode 100644 x/team/client/cli/query.go create mode 100644 x/team/client/cli/tx.go create mode 100644 x/team/client/cli/tx_claim_account_rewards.go create mode 100644 x/team/client/cli/tx_claim_authority_rewards.go create mode 100644 x/team/client/cli/tx_claim_unlocked.go create mode 100644 x/team/client/cli/tx_clawback.go create mode 100644 x/team/client/cli/tx_create_team_vesting_account.go create mode 100644 x/team/genesis.go create mode 100644 x/team/keeper/abci_inflation_rewards_test.go create mode 100644 x/team/keeper/getters_team_vesting_account.go create mode 100644 x/team/keeper/grpc_query.go create mode 100644 x/team/keeper/grpc_query_vesting_status_by_time.go create mode 100644 x/team/keeper/grpc_team_info.go create mode 100644 x/team/keeper/grpc_team_vesting_account.go create mode 100644 x/team/keeper/grpc_team_vesting_status.go create mode 100644 x/team/keeper/keeper.go create mode 100644 x/team/keeper/keeper_test.go create mode 100644 x/team/keeper/logic_team.go create mode 100644 x/team/keeper/logic_team_test.go create mode 100644 x/team/keeper/msg_server.go create mode 100644 x/team/keeper/msg_server_claim_account_rewards.go create mode 100644 x/team/keeper/msg_server_claim_account_rewards_test.go create mode 100644 x/team/keeper/msg_server_claim_authority_rewards.go create mode 100644 x/team/keeper/msg_server_claim_authority_rewards_test.go create mode 100644 x/team/keeper/msg_server_claim_unlocked.go create mode 100644 x/team/keeper/msg_server_claim_unlocked_test.go create mode 100644 x/team/keeper/msg_server_clawback.go create mode 100644 x/team/keeper/msg_server_clawback_test.go create mode 100644 x/team/keeper/msg_server_create_team_vesting_account.go create mode 100644 x/team/keeper/msg_server_create_team_vesting_account_test.go create mode 100644 x/team/module.go create mode 100644 x/team/spec/01_concepts.md create mode 100644 x/team/spec/02_state.md create mode 100644 x/team/spec/03_messages.md create mode 100644 x/team/spec/04_begin_block.md create mode 100644 x/team/spec/05_events.md create mode 100644 x/team/types/codec.go create mode 100644 x/team/types/errors.go create mode 100644 x/team/types/events.pb.go create mode 100644 x/team/types/genesis.go create mode 100644 x/team/types/genesis.pb.go create mode 100644 x/team/types/keys.go create mode 100644 x/team/types/message_claim_account_rewards.go create mode 100644 x/team/types/message_claim_authority_rewards.go create mode 100644 x/team/types/message_claim_unlocked.go create mode 100644 x/team/types/message_clawback.go create mode 100644 x/team/types/message_create_team_vesting_account.go create mode 100644 x/team/types/query.pb.go create mode 100644 x/team/types/query.pb.gw.go create mode 100644 x/team/types/team.pb.go create mode 100644 x/team/types/tx.pb.go create mode 100644 x/team/types/types.go diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md new file mode 100644 index 00000000..a4db3477 --- /dev/null +++ b/.github/CHANGELOG.md @@ -0,0 +1,73 @@ + + +# Changelog + +## Unreleased + +### Features + +- Unbonding time for unstaking from a pool. Protocol node runners have to keep their node running during the unbonding. +- Unbonding time for undelegating from a staker in a pool. The unbonding is performed immediately but the delegator has + to wait until the tokens are transferred back. + +- Switch to our custom fork of the Cosmos SDK. This includes the following: + - Stakers and delegators in the KYVE protocol can now participate in governance. + - Proposals can now be expedited, in the case of any emergency actions that need to be taken. + - Different proposals can have different voting periods, depending on the proposal type. + +### Improvements + +- Bump [Cosmos SDK](https://github.com/cosmos/cosmos-sdk) to [`v0.45.5`](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.45.5). See [CHANGELOG](https://github.com/cosmos/cosmos-sdk/blob/v0.45.5/CHANGELOG.md#v0455---2022-06-09) for more details. +- Bump [IBC](https://github.com/cosmos/ibc-go) to [`v3.1.0`](https://github.com/cosmos/ibc-go/releases/tag/v3.1.0). See [CHANGELOG](https://github.com/cosmos/ibc-go/blob/v3.1.0/CHANGELOG.md#v310---2022-04-16) for more details. + +### Client Breaking Changes + +- Switch vote type in `MsgVoteProposal` from `uint64` to `enum`. +- Events from the `x/registry` module are now fully typed. + +## [v0.4.0](https://github.com/KYVENetwork/chain/releases/tag/v0.4.0) - 2022-06-7 + +### Features + +- Implemented scheduled upgrades for pool versions +- Implemented `abstain` vote besides `valid` and `invalid`. Validators who don't vote 5 times in a row at all get removed with a timeout slash + +### Client Breaking Changes + +- The arg `vote` on `MsgVoteProposal` changed from `bool` to `uint64`. 0 = valid, 1 = invalid, 2 = abstain +- The arg `versions` on `MsgCreatePoolProposal` changed to `version` +- The arg `binaries` got added to `MsgCreatePoolProposal` + +### Improvements + +- Check the quorum of the bundle proposal on chain to prevent unjustified slashes +- Don't drop bundle proposals if one funder can't afford the funding cost, instead remove all of them and proceed +- If a validator submits a `NO_DATA_BUNDLE` the will just skip the upload instead of proposing an empty bundle +- Added query `QueryFunder` +- Added query `QueryStaker` +- Added query `QueryDelegator` + +### Bug Fixes + +### Deprecated + +- Deprecated `versions` on `kyve.registry.v1beta1.Pool` diff --git a/.github/README.md b/.github/README.md new file mode 100644 index 00000000..9b9b53f5 --- /dev/null +++ b/.github/README.md @@ -0,0 +1,33 @@ +# The KYVE Chain + +###### v0.5.3 + +The chain nodes are the backbone of KYVE. The chain layer is a completely sovereign +[Proof of Stake](https://en.wikipedia.org/wiki/Proof_of_stake) blockchain build with +[Cosmos SDK](https://github.com/cosmos/cosmos-sdk) using the [Ignite CLI](https://ignt.com/cli). This blockchain is run +by independent nodes we call _Chain Nodes_ since they're running on the chain level. The native currency of the KYVE +chain is [$KYVE](https://docs.kyve.network/basics/kyve.html), it secures the chain and allows chain nodes to stake and +other users to delegate into them. + +--- + +## Building from source + +To build from source, the [Ignite CLI](https://ignt.com/cli) is required. + +```sh +ignite chain build --release --release.prefix kyve +``` + +The output can be found in `./release`. + +If you need to build for different architectures, use the `-t` flag, e.g. `-t linux:amd64,linux:arm64`. + +## Running a chain node + +Full documentation for setting up a chain node are provided [here](https://docs.kyve.network/getting-started/chain-node.html). + + +### Emergency_1295379 +On block #1295379 the chain halts and an emergency upgrade is required. +This upgrade is not handled by cosmovisor. To manually perform the upgrade visit [Emergency_1295379.md](emergency_1295379.md) \ No newline at end of file diff --git a/.github/emergency_1295379.md b/.github/emergency_1295379.md new file mode 100644 index 00000000..bd80ddf6 --- /dev/null +++ b/.github/emergency_1295379.md @@ -0,0 +1,53 @@ +## Emergency_1295379 + +**This guide assumes you are running the chain using cosmosvisor as explained [here](https://docs.kyve.network/getting-started/chain-node.html).** + +On block #1295379 an error occurred in the end_block_logic which caused the chain to halt. +To recover from this error an emergency fix is required. +To apply the emergency fix the following commands need to be executed. + +Stop the current chain binary. If you are running using the system-daemon, do +```shell +sudo systemctl stop kyved +``` + +Move the patch-binary manually and prepare cosmovisor. +```shell +mkdir -p ~/.kyve/cosmovisor/upgrades/emergency_1295379/bin +cd ~/.kyve/cosmovisor/upgrades/emergency_1295379 +echo '{"name":"emergency_1295381","info":""}' > upgrade-info.json +cd bin +wget https://github.com/KYVENetwork/chain/releases/download/v0.5.3/chain_linux_amd64.tar.gz +tar -xvzf chain_linux_amd64.tar.gz +``` +Check that the sha256 sum is correct: +``` +echo "1d93f530e438da9459b79c67a3ea7423aad7b0e814154eb310685500fdb8a758 chain_linux_amd64.tar.gz" | sha256sum -c +``` + +If there are issues with the disk-space, disable the backup creation of cosmovisor. +Add +```sh +# This line is optional +Environment="UNSAFE_SKIP_BACKUP=true" +``` +to the other environment variables in `/etc/systemd/system/kyved.service` and reload the service: +```shell +sudo systemctl daemon-reload +``` +Remember to remove this line once it's processed if you want to keep the backup option enabled. + +Then start cosmovisor: +```shell +sudo systemctl start kyved +``` +Watch the log with +```shell +sudo journalctl -u kyved -f +``` +and see if the upgrade passes successfully (i.e. the chain does not crash). + +We will wait until `5th June 2022 - 12:00 UTC` until we start the validators again, to give everybody time to perform the upgrade. + + + diff --git a/.github/mergify.yml b/.github/mergify.yml new file mode 100644 index 00000000..2148caf3 --- /dev/null +++ b/.github/mergify.yml @@ -0,0 +1,16 @@ +defaults: + actions: + backport: + # By default, all backport PRs are assigned to the original author. + assignees: + - "{{ author }}" + +pull_request_rules: + - name: backport patches to v1.0.x branch + conditions: + - base=main + - label=backport/v1.0.x + actions: + backport: + branches: + - release/v1.0.x diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..accc8d20 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,29 @@ +on: push + +jobs: + buf: + runs-on: ubuntu-latest + steps: + # Run `git checkout` + - uses: actions/checkout@v3 + # Install `buf` + - uses: bufbuild/buf-setup-action@v1 + with: + github_token: ${{ github.token }} + # Lint Protobuf files + - uses: bufbuild/buf-lint-action@v1 + with: + buf_token: ${{ secrets.BUF_TOKEN }} + +# TODO(@john): Figure out why linting passes locally but not here. +# golangci: +# runs-on: ubuntu-latest +# steps: +# # Run `git checkout` +# - uses: actions/checkout@v3 +# # Install `go` +# - uses: actions/setup-go@v3 +# # Lint Go files +# - uses: golangci/golangci-lint-action@v3 +# with: +# args: --timeout=10m diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..2dbecbe0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +.idea +.vscode +node_modules +release +.DS_Store +/scripts/ +test/.env +chain + +build diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..406f0c48 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,30 @@ +# CHANGELOG + +## v0.7.0_beta7 + +- refactor: refactored custom keys by renaming `height` to `index` and adding both properties `from_key` and `to_key` to bundle proposal +- refactor: renamed `current_height` to `current_index` and `current_value` to `current_summary` on pool +- refactor: removed `from_height` and `to_height` from bundle proposal and instead added `from_index` and `bundle_size` to indicate more clearly the data range of the bundle +- refactor: renamed `bundle_hash` to `data_hash` on bundle proposal to make it clear the raw compressed data as it lies on the storage provider is hashed +- refactor: renamed `byte_size` to `data_size` on bundle proposal +- refactor: refactored bundle value by renaming `to_value` to `bundle_summary` and allowing protocol nodes to submit an entire bundle summary on-chain instead of just a single value +- feat: added and implemented event `EventPointIncreased` +- feat: added and implemented event `EventPointsReset` +- fix: implemented unused event `EventSlash` +- fix: throw error now if staker joins with a valaddress that is already used by another staker in the same pool + +## v0.7.0_beta8 +- refactor: added `ar://` to every arweave tx for pool logos +- feat: pool config is now stored externally on arweave of ipfs +- feat: `storageProviderId` and `compressionId` were introduced to pools to enable dynamic storage provider and compression switching +- Refactor Events: + - Emit ClaimedUploaderRole-event + - EventDelegate: `node` -> `staker` + - EventUndelegate: `node` -> `staker` + - EventRedelegate: `from_node` -> `from_staker`, `to_node` -> `to_staker` + - EventWithdrawRewards: `from_node` -> `staker` + - EventCreateStaker: `address` -> `staker` + - EventUpdateMetadata: `address` -> `staker` + - EventSlash: `address` -> `staker` + - EventUpdateCommission: `address` -> `staker` +- Emit `LeavePoolEvent` if staker gets kicked out of pool diff --git a/CodeStructure.md b/CodeStructure.md new file mode 100644 index 00000000..45389f2d --- /dev/null +++ b/CodeStructure.md @@ -0,0 +1,18 @@ +# Code Structure + +### Getters +`getters` belong to the keeper directory. Getter files are prefixed with +`getters_`. All methods there always succeed. In the worst case nothing happens. +These methods never throw an error. This is the only place where methods are allowed +to write to the KV-Store. Also, all aggregation variables are updated here. + + +### Logic-Files +These files are prefixed with `logic_` and handle complex tasks. +They are allowed and encouraged to emit events and call the getters functions. +All logic happens here. + + +### Msg-Server +Handle transactions on a high level. As much logic as possible should be forwarded +to the logic files. This file should always be easy to read. diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..f1e5c194 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2023 BCP Innovations UG (haftungsbeschränkt) + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..823e8a93 --- /dev/null +++ b/Makefile @@ -0,0 +1,133 @@ +COMMIT := $(shell git log -1 --format='%H') +DENOM := tkyve +VERSION := 1.0.0-rc0 # $(shell echo $(shell git describe --tags) | sed 's/^v//') + +# Team Module +TEAM_AUTHORITY_ADDRESS := kyve1vut528et85755xsncjwl6dx8xakuv26hxgyv0n +TEAM_ALLOCATION := 165000000000000 +TEAM_TGE := 2023-02-07T14:00:00 + +ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=kyve \ + -X github.com/cosmos/cosmos-sdk/version.AppName=kyved \ + -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \ + -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \ + -X github.com/KYVENetwork/chain/x/global/types.Denom=$(DENOM) \ + -X github.com/KYVENetwork/chain/x/team/types.TEAM_AUTHORITY_STRING=$(TEAM_AUTHORITY_ADDRESS) \ + -X github.com/KYVENetwork/chain/x/team/types.TEAM_ALLOCATION_STRING=$(TEAM_ALLOCATION) \ + -X github.com/KYVENetwork/chain/x/team/types.TGE_STRING=$(TEAM_TGE) + +# TODO(@john): Are the missing flags needed? +BUILD_FLAGS := -ldflags '$(ldflags)' -tags 'ledger' -trimpath + +.PHONY: proto-setup proto-format proto-lint proto-gen \ + format lint vet test build release dev +all: proto-all format lint test build + +############################################################################### +### Build ### +############################################################################### + +build: + @echo "🤖 Building kyved..." + @go build $(BUILD_FLAGS) -o "$(PWD)/build/" ./cmd/kyved + @echo "✅ Completed build!" + +install: + @echo "🤖 Installing kyved..." + @go install -mod=readonly $(BUILD_FLAGS) ./cmd/kyved + @echo "✅ Completed installation!" + +release: + @echo "🤖 Creating kyved releases..." + @rm -rf release + @mkdir -p release + + @GOOS=darwin GOARCH=amd64 go build $(BUILD_FLAGS) ./cmd/kyved + @tar -czf release/kyved_darwin_amd64.tar.gz kyved + @sha256sum release/kyved_darwin_amd64.tar.gz >> release/release_checksum + + @GOOS=darwin GOARCH=arm64 go build $(BUILD_FLAGS) ./cmd/kyved + @tar -czf release/kyved_darwin_arm64.tar.gz kyved + @sha256sum release/kyved_darwin_arm64.tar.gz >> release/release_checksum + + @GOOS=linux GOARCH=amd64 go build $(BUILD_FLAGS) ./cmd/kyved + @tar -czf release/kyved_linux_amd64.tar.gz kyved + @sha256sum release/kyved_linux_amd64.tar.gz >> release/release_checksum + + @GOOS=linux GOARCH=arm64 go build $(BUILD_FLAGS) ./cmd/kyved + @tar -czf release/kyved_linux_arm64.tar.gz kyved + @sha256sum release/kyved_linux_arm64.tar.gz >> release/release_checksum + + @rm kyved + @echo "✅ Completed release creation!" + +############################################################################### +### Development ### +############################################################################### + +# TODO(@john): Switch to the Docker image? +dev: + @ignite chain serve --reset-once --skip-proto --verbose + +############################################################################### +### Formatting & Linting ### +############################################################################### + +gofumpt_cmd=mvdan.cc/gofumpt +golangci_lint_cmd=github.com/golangci/golangci-lint/cmd/golangci-lint + +format: + @echo "🤖 Running formatter..." + @go run $(gofumpt_cmd) -l -w . + @echo "✅ Completed formatting!" + +lint: + @echo "🤖 Running linter..." + @go run $(golangci_lint_cmd) run --skip-dirs scripts --timeout=10m + @echo "✅ Completed linting!" + +# TODO(@john): Can we remove this since we use GolangCI? +vet: + @echo "🤖 Running vet..." + @go vet ./... + @echo "✅ Completed vet!" + +############################################################################### +### Protobuf ### +############################################################################### + +BUF_VERSION=1.13.1 + +proto-all: proto-format proto-lint proto-gen + +proto-format: + @echo "🤖 Running protobuf formatter..." + @docker run --volume "$(PWD)":/workspace --workdir /workspace \ + bufbuild/buf:$(BUF_VERSION) format --diff --write + @echo "✅ Completed protobuf formatting!" + +proto-gen: + @echo "🤖 Generating code from protobuf..." + @docker run --rm --volume "$(PWD)":/workspace --workdir /workspace \ + kyve-proto sh ./proto/generate.sh + @echo "✅ Completed code generation!" + +proto-lint: + @echo "🤖 Running protobuf linter..." + @docker run --volume "$(PWD)":/workspace --workdir /workspace \ + bufbuild/buf:$(BUF_VERSION) lint + @echo "✅ Completed protobuf linting!" + +proto-setup: + @echo "🤖 Setting up protobuf environment..." + @docker build --rm --tag kyve-proto:latest --file proto/Dockerfile . + @echo "✅ Setup protobuf environment!" + +############################################################################### +### Tests & Simulation ### +############################################################################### + +test: + @echo "🤖 Running tests..." + @go test -cover -mod=readonly ./x/... + @echo "✅ Completed tests!" diff --git a/app/ante.go b/app/ante.go new file mode 100644 index 00000000..c1468443 --- /dev/null +++ b/app/ante.go @@ -0,0 +1,80 @@ +package app + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + // Auth + "github.com/cosmos/cosmos-sdk/x/auth/ante" + authKeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + "github.com/cosmos/cosmos-sdk/x/auth/signing" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // FeeGrant + feeGrantKeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + // Global + "github.com/KYVENetwork/chain/x/global" + globalKeeper "github.com/KYVENetwork/chain/x/global/keeper" + // Gov + govKeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + // IBC + ibcAnte "github.com/cosmos/ibc-go/v5/modules/core/ante" + ibcKeeper "github.com/cosmos/ibc-go/v5/modules/core/keeper" + // Staking + stakingKeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" +) + +// https://github.com/cosmos/cosmos-sdk/blob/release/v0.46.x/x/auth/ante/ante.go#L25 + +func NewAnteHandler( + accountKeeper authKeeper.AccountKeeper, + bankKeeper bankKeeper.Keeper, + feeGrantKeeper feeGrantKeeper.Keeper, + globalKeeper globalKeeper.Keeper, + govKeeper govKeeper.Keeper, + ibcKeeper *ibcKeeper.Keeper, + stakingKeeper stakingKeeper.Keeper, + sigGasConsumer ante.SignatureVerificationGasConsumer, + signModeHandler signing.SignModeHandler, +) (sdk.AnteHandler, error) { + deductFeeDecorator := global.NewDeductFeeDecorator(accountKeeper, bankKeeper, feeGrantKeeper, globalKeeper, stakingKeeper) + + gasAdjustmentDecorator := global.NewGasAdjustmentDecorator(globalKeeper) + + initialDepositDecorator := global.NewInitialDepositDecorator(globalKeeper, govKeeper) + + anteDecorators := []sdk.AnteDecorator{ + ante.NewSetUpContextDecorator(), // outermost AnteDecorator. SetUpContext must be called first + gasAdjustmentDecorator, + ante.NewExtensionOptionsDecorator(nil), + ante.NewValidateBasicDecorator(), + ante.NewTxTimeoutHeightDecorator(), + ante.NewValidateMemoDecorator(accountKeeper), + ante.NewConsumeGasForTxSizeDecorator(accountKeeper), + deductFeeDecorator, + ante.NewSetPubKeyDecorator(accountKeeper), // SetPubKeyDecorator must be called before all signature verification decorators + ante.NewValidateSigCountDecorator(accountKeeper), + ante.NewSigGasConsumeDecorator(accountKeeper, sigGasConsumer), + ante.NewSigVerificationDecorator(accountKeeper, signModeHandler), + ante.NewIncrementSequenceDecorator(accountKeeper), + ibcAnte.NewRedundantRelayDecorator(ibcKeeper), + initialDepositDecorator, + } + + return sdk.ChainAnteDecorators(anteDecorators...), nil +} + +// + +func NewPostHandler( + bankKeeper bankKeeper.Keeper, + feeGrantKeeper feeGrantKeeper.Keeper, + globalKeeper globalKeeper.Keeper, +) (sdk.AnteHandler, error) { + refundFeeDecorator := global.NewRefundFeeDecorator(bankKeeper, feeGrantKeeper, globalKeeper) + + postDecorators := []sdk.AnteDecorator{ + refundFeeDecorator, + } + + return sdk.ChainAnteDecorators(postDecorators...), nil +} diff --git a/app/app.go b/app/app.go new file mode 100644 index 00000000..6052663a --- /dev/null +++ b/app/app.go @@ -0,0 +1,909 @@ +package app + +import ( + "fmt" + "io" + "net/http" + "os" + "path/filepath" + + v070 "github.com/KYVENetwork/chain/app/upgrades/v0_8_0" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/server/api" + "github.com/cosmos/cosmos-sdk/server/config" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/auth" + "github.com/cosmos/cosmos-sdk/x/auth/ante" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/auth/vesting" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + authzTypes "github.com/cosmos/cosmos-sdk/x/authz" + authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" + authz "github.com/cosmos/cosmos-sdk/x/authz/module" + "github.com/cosmos/cosmos-sdk/x/bank" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/capability" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/crisis" + crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + "github.com/cosmos/cosmos-sdk/x/distribution" + distrclient "github.com/cosmos/cosmos-sdk/x/distribution/client" + distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/cosmos/cosmos-sdk/x/evidence" + evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + "github.com/cosmos/cosmos-sdk/x/feegrant" + feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + feeGrant "github.com/cosmos/cosmos-sdk/x/feegrant/module" + "github.com/cosmos/cosmos-sdk/x/genutil" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + "github.com/cosmos/cosmos-sdk/x/gov" + govclient "github.com/cosmos/cosmos-sdk/x/gov/client" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + groupTypes "github.com/cosmos/cosmos-sdk/x/group" + groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" + group "github.com/cosmos/cosmos-sdk/x/group/module" + "github.com/cosmos/cosmos-sdk/x/mint" + mintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + "github.com/cosmos/cosmos-sdk/x/params" + paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + "github.com/cosmos/cosmos-sdk/x/slashing" + slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/cosmos/cosmos-sdk/x/staking" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/cosmos/cosmos-sdk/x/upgrade" + upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" + upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + ica "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts" + icaHost "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host" + icahostkeeper "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host/keeper" + icaHostTypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host/types" + icatypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/types" + ibcFee "github.com/cosmos/ibc-go/v5/modules/apps/29-fee" + ibcFeeKeeper "github.com/cosmos/ibc-go/v5/modules/apps/29-fee/keeper" + ibcFeeTypes "github.com/cosmos/ibc-go/v5/modules/apps/29-fee/types" + ibcTransfer "github.com/cosmos/ibc-go/v5/modules/apps/transfer" + ibctransferkeeper "github.com/cosmos/ibc-go/v5/modules/apps/transfer/keeper" + ibcTransferTypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + ibc "github.com/cosmos/ibc-go/v5/modules/core" + ibcclient "github.com/cosmos/ibc-go/v5/modules/core/02-client" + ibcclientclient "github.com/cosmos/ibc-go/v5/modules/core/02-client/client" + ibcclienttypes "github.com/cosmos/ibc-go/v5/modules/core/02-client/types" + ibcPortTypes "github.com/cosmos/ibc-go/v5/modules/core/05-port/types" + ibchost "github.com/cosmos/ibc-go/v5/modules/core/24-host" + ibckeeper "github.com/cosmos/ibc-go/v5/modules/core/keeper" + "github.com/spf13/cast" + abci "github.com/tendermint/tendermint/abci/types" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + dbm "github.com/tendermint/tm-db" + + kyveDocs "github.com/KYVENetwork/chain/docs" + // this line is used by starport scaffolding # stargate/app/moduleImport + + // Bundles + "github.com/KYVENetwork/chain/x/bundles" + bundlesKeeper "github.com/KYVENetwork/chain/x/bundles/keeper" + bundlesTypes "github.com/KYVENetwork/chain/x/bundles/types" + // Delegation + "github.com/KYVENetwork/chain/x/delegation" + delegationKeeper "github.com/KYVENetwork/chain/x/delegation/keeper" + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + // Global + "github.com/KYVENetwork/chain/x/global" + globalKeeper "github.com/KYVENetwork/chain/x/global/keeper" + globalTypes "github.com/KYVENetwork/chain/x/global/types" + // Pool + "github.com/KYVENetwork/chain/x/pool" + poolKeeper "github.com/KYVENetwork/chain/x/pool/keeper" + poolTypes "github.com/KYVENetwork/chain/x/pool/types" + // Query + "github.com/KYVENetwork/chain/x/query" + queryKeeper "github.com/KYVENetwork/chain/x/query/keeper" + queryTypes "github.com/KYVENetwork/chain/x/query/types" + // Stakers + "github.com/KYVENetwork/chain/x/stakers" + stakersKeeper "github.com/KYVENetwork/chain/x/stakers/keeper" + stakersTypes "github.com/KYVENetwork/chain/x/stakers/types" + // Team + "github.com/KYVENetwork/chain/x/team" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + teamTypes "github.com/KYVENetwork/chain/x/team/types" +) + +const ( + AccountAddressPrefix = "kyve" + Name = "kyve" +) + +var upgradeInfo upgradetypes.Plan + +func getGovProposalHandlers() []govclient.ProposalHandler { + var govProposalHandlers []govclient.ProposalHandler + + govProposalHandlers = append(govProposalHandlers, + paramsclient.ProposalHandler, + distrclient.ProposalHandler, + upgradeclient.LegacyProposalHandler, + upgradeclient.LegacyCancelProposalHandler, + ibcclientclient.UpdateClientProposalHandler, + ibcclientclient.UpgradeProposalHandler, + ) + + return govProposalHandlers +} + +var ( + // DefaultNodeHome default home directories for the application daemon + DefaultNodeHome string + + // ModuleBasics defines the module BasicManager is in charge of setting up basic, + // non-dependant module elements, such as codec registration + // and genesis verification. + ModuleBasics = module.NewBasicManager(appModuleBasics...) + + // module account permissions + maccPerms = moduleAccountPermissions +) + +var _ servertypes.Application = (*App)(nil) + +func init() { + userHomeDir, err := os.UserHomeDir() + if err != nil { + panic(err) + } + + DefaultNodeHome = filepath.Join(userHomeDir, "."+Name) +} + +// App extends an ABCI application, but with most of its parameters exported. +// They are exported for convenience in creating helper functions, as object +// capabilities aren't needed for testing. +type App struct { + *baseapp.BaseApp + + Keepers + + cdc *codec.LegacyAmino + appCodec codec.Codec + interfaceRegistry types.InterfaceRegistry + + invCheckPeriod uint + + // keys to access the substores + keys map[string]*storetypes.KVStoreKey + tkeys map[string]*storetypes.TransientStoreKey + memKeys map[string]*storetypes.MemoryStoreKey + + // mm is the module manager + mm *module.Manager + + configurator module.Configurator +} + +// NewKYVEApp returns a reference to an initialized blockchain app +func NewKYVEApp( + logger log.Logger, + db dbm.DB, + traceStore io.Writer, + loadLatest bool, + skipUpgradeHeights map[int64]bool, + homePath string, + invCheckPeriod uint, + encodingConfig EncodingConfig, + appOpts servertypes.AppOptions, + baseAppOptions ...func(*baseapp.BaseApp), +) *App { + appCodec := encodingConfig.Marshaler + cdc := encodingConfig.Amino + interfaceRegistry := encodingConfig.InterfaceRegistry + + bApp := baseapp.NewBaseApp(Name, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...) + bApp.SetCommitMultiStoreTracer(traceStore) + bApp.SetVersion(version.Version) + bApp.SetInterfaceRegistry(interfaceRegistry) + + keys := sdk.NewKVStoreKeys( + authtypes.StoreKey, authzTypes.ModuleName, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, govtypes.StoreKey, + paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, + evidencetypes.StoreKey, ibcFeeTypes.StoreKey, ibcTransferTypes.StoreKey, icaHostTypes.StoreKey, + capabilitytypes.StoreKey, groupTypes.StoreKey, + + bundlesTypes.StoreKey, + delegationTypes.StoreKey, + globalTypes.StoreKey, + poolTypes.StoreKey, + queryTypes.StoreKey, + stakersTypes.StoreKey, + teamTypes.StoreKey, + ) + tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey) + memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, bundlesTypes.MemStoreKey, delegationTypes.MemStoreKey) + + app := &App{ + BaseApp: bApp, + cdc: cdc, + appCodec: appCodec, + interfaceRegistry: interfaceRegistry, + invCheckPeriod: invCheckPeriod, + keys: keys, + tkeys: tkeys, + memKeys: memKeys, + } + + app.ParamsKeeper = initParamsKeeper( + appCodec, + cdc, + keys[paramstypes.StoreKey], + tkeys[paramstypes.TStoreKey], + ) + + // set the BaseApp's parameter store + bApp.SetParamStore(app.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable())) + + // add capability keeper and ScopeToModule for ibc module + app.CapabilityKeeper = capabilitykeeper.NewKeeper( + appCodec, + keys[capabilitytypes.StoreKey], + memKeys[capabilitytypes.MemStoreKey], + ) + + // grant capabilities for the ibc and ibc-transfer modules + scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) + scopedIBCTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibcTransferTypes.ModuleName) + scopedICAHostKeeper := app.CapabilityKeeper.ScopeToModule(icaHostTypes.SubModuleName) + // this line is used by starport scaffolding # stargate/app/scopedKeeper + + // add keepers + app.AccountKeeper = authkeeper.NewAccountKeeper( + appCodec, + keys[authtypes.StoreKey], + app.GetSubspace(authtypes.ModuleName), + authtypes.ProtoBaseAccount, + maccPerms, + sdk.Bech32PrefixAccAddr, + ) + + app.AuthzKeeper = authzkeeper.NewKeeper( + keys[authzTypes.ModuleName], + appCodec, + app.MsgServiceRouter(), + app.AccountKeeper, + ) + + app.BankKeeper = bankkeeper.NewBaseKeeper( + appCodec, + keys[banktypes.StoreKey], + app.AccountKeeper, + app.GetSubspace(banktypes.ModuleName), + app.BlockedModuleAccountAddrs(), + ) + + stakingKeeper := stakingkeeper.NewKeeper( + appCodec, + keys[stakingtypes.StoreKey], + app.AccountKeeper, + app.BankKeeper, + app.GetSubspace(stakingtypes.ModuleName), + ) + + app.MintKeeper = mintkeeper.NewKeeper( + appCodec, + keys[minttypes.StoreKey], + app.GetSubspace(minttypes.ModuleName), + &stakingKeeper, + &app.StakersKeeper, + app.AccountKeeper, + app.BankKeeper, + authtypes.FeeCollectorName, + ) + + app.DistributionKeeper = distrkeeper.NewKeeper( + appCodec, + keys[distrtypes.StoreKey], + app.GetSubspace(distrtypes.ModuleName), + app.AccountKeeper, + app.BankKeeper, + &stakingKeeper, + authtypes.FeeCollectorName, + ) + + app.SlashingKeeper = slashingkeeper.NewKeeper( + appCodec, + keys[slashingtypes.StoreKey], + &stakingKeeper, + app.GetSubspace(slashingtypes.ModuleName), + ) + + app.CrisisKeeper = crisiskeeper.NewKeeper( + app.GetSubspace(crisistypes.ModuleName), + invCheckPeriod, + app.BankKeeper, + authtypes.FeeCollectorName, + ) + + groupConfig := groupTypes.DefaultConfig() + /* + Example of setting group params: + groupConfig.MaxMetadataLen = 1000 + */ + app.GroupKeeper = groupkeeper.NewKeeper( + keys[groupTypes.StoreKey], + appCodec, + app.MsgServiceRouter(), + app.AccountKeeper, + groupConfig, + ) + + app.FeeGrantKeeper = feegrantkeeper.NewKeeper( + appCodec, + keys[feegrant.StoreKey], + app.AccountKeeper, + ) + + app.UpgradeKeeper = upgradekeeper.NewKeeper( + skipUpgradeHeights, + keys[upgradetypes.StoreKey], + appCodec, + homePath, + app.BaseApp, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + // register the staking hooks + // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks + app.StakingKeeper = *stakingKeeper.SetHooks( + stakingtypes.NewMultiStakingHooks(app.DistributionKeeper.Hooks(), app.SlashingKeeper.Hooks()), + ) + + // ... other modules keepers + app.GlobalKeeper = *globalKeeper.NewKeeper(appCodec, keys[globalTypes.StoreKey], authtypes.NewModuleAddress(govtypes.ModuleName).String()) + + app.TeamKeeper = *teamKeeper.NewKeeper(appCodec, keys[teamTypes.StoreKey], app.AccountKeeper, app.BankKeeper) + + app.PoolKeeper = *poolKeeper.NewKeeper( + appCodec, + keys[poolTypes.StoreKey], + memKeys[poolTypes.MemStoreKey], + + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + + app.AccountKeeper, + app.BankKeeper, + app.DistributionKeeper, + app.UpgradeKeeper, + ) + + app.StakersKeeper = *stakersKeeper.NewKeeper( + appCodec, + keys[stakersTypes.StoreKey], + memKeys[stakersTypes.MemStoreKey], + + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + + app.AccountKeeper, + app.BankKeeper, + app.DistributionKeeper, + app.PoolKeeper, + app.UpgradeKeeper, + ) + + app.DelegationKeeper = *delegationKeeper.NewKeeper( + appCodec, + keys[delegationTypes.StoreKey], + memKeys[delegationTypes.MemStoreKey], + + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + + app.AccountKeeper, + app.BankKeeper, + app.DistributionKeeper, + app.PoolKeeper, + app.UpgradeKeeper, + app.StakersKeeper, + ) + + stakersKeeper.SetDelegationKeeper(&app.StakersKeeper, app.DelegationKeeper) + poolKeeper.SetStakersKeeper(&app.PoolKeeper, app.StakersKeeper) + + app.BundlesKeeper = *bundlesKeeper.NewKeeper( + appCodec, + keys[bundlesTypes.StoreKey], + memKeys[bundlesTypes.MemStoreKey], + + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + + app.AccountKeeper, + app.BankKeeper, + app.DistributionKeeper, + app.PoolKeeper, + app.StakersKeeper, + app.DelegationKeeper, + app.UpgradeKeeper, + ) + + // Create IBC Keepers + app.IBCKeeper = ibckeeper.NewKeeper( + appCodec, keys[ibchost.StoreKey], + app.GetSubspace(ibchost.ModuleName), + app.StakingKeeper, + app.UpgradeKeeper, + scopedIBCKeeper, + ) + + app.IBCFeeKeeper = ibcFeeKeeper.NewKeeper( + appCodec, keys[ibcFeeTypes.StoreKey], + app.GetSubspace(ibcFeeTypes.ModuleName), + app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.AccountKeeper, + app.BankKeeper, + ) + + app.IBCTransferKeeper = ibctransferkeeper.NewKeeper( + appCodec, keys[ibcTransferTypes.StoreKey], + app.GetSubspace(ibcTransferTypes.ModuleName), + app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.AccountKeeper, + app.BankKeeper, + scopedIBCTransferKeeper, + ) + + app.ICAHostKeeper = icahostkeeper.NewKeeper( + appCodec, keys[icaHostTypes.StoreKey], + app.GetSubspace(icaHostTypes.SubModuleName), + app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.AccountKeeper, + scopedICAHostKeeper, + app.MsgServiceRouter(), + ) + + // Create evidence Keeper for to register the IBC light client misbehaviour evidence route + evidenceKeeper := evidencekeeper.NewKeeper( + appCodec, + keys[evidencetypes.StoreKey], + &app.StakingKeeper, + app.SlashingKeeper, + ) + // If evidence needs to be handled for the app, set routes in router here and seal + app.EvidenceKeeper = *evidenceKeeper + + govRouter := govv1beta1.NewRouter() + govRouter. + AddRoute(govtypes.RouterKey, govv1beta1.ProposalHandler). + AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)). + AddRoute(distrtypes.RouterKey, distribution.NewCommunityPoolSpendProposalHandler(app.DistributionKeeper)). + AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper)). + AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper)) + govConfig := govtypes.DefaultConfig() + app.GovKeeper = govkeeper.NewKeeper( + appCodec, + keys[govtypes.StoreKey], + app.GetSubspace(govtypes.ModuleName), + app.AccountKeeper, + app.BankKeeper, + &stakingKeeper, + &app.StakersKeeper, + govRouter, + app.MsgServiceRouter(), + govConfig, + ) + + app.QueryKeeper = *queryKeeper.NewKeeper( + appCodec, + keys[queryTypes.StoreKey], + keys[queryTypes.MemStoreKey], + app.GetSubspace(queryTypes.ModuleName), + + app.AccountKeeper, + app.BankKeeper, + app.DistributionKeeper, + app.PoolKeeper, + app.StakersKeeper, + app.DelegationKeeper, + app.BundlesKeeper, + app.GlobalKeeper, + app.GovKeeper, + app.TeamKeeper, + ) + // this line is used by starport scaffolding # stargate/app/keeperDefinition + + // Create static IBC router, add transfer route, then set and seal it + var ibcTransferStack ibcPortTypes.IBCModule + ibcTransferStack = ibcTransfer.NewIBCModule(app.IBCTransferKeeper) + ibcTransferStack = ibcFee.NewIBCMiddleware(ibcTransferStack, app.IBCFeeKeeper) + + var icaHostStack ibcPortTypes.IBCModule + icaHostStack = icaHost.NewIBCModule(app.ICAHostKeeper) + icaHostStack = ibcFee.NewIBCMiddleware(icaHostStack, app.IBCFeeKeeper) + + ibcRouter := ibcPortTypes.NewRouter() + ibcRouter.AddRoute(ibcTransferTypes.ModuleName, ibcTransferStack). + AddRoute(icaHostTypes.SubModuleName, icaHostStack) + app.IBCKeeper.SetRouter(ibcRouter) + + /**** Module Options ****/ + + // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment + // we prefer to be more strict in what arguments the modules expect. + skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)) + + // NOTE: Any module instantiated in the module manager that is later modified + // must be passed by reference here. + + app.mm = module.NewManager( + // Cosmos SDK + auth.NewAppModule(appCodec, app.AccountKeeper, nil), + authz.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper), + capability.NewAppModule(appCodec, *app.CapabilityKeeper), + crisis.NewAppModule(&app.CrisisKeeper, skipGenesisInvariants), + distribution.NewAppModule(appCodec, app.DistributionKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + evidence.NewAppModule(app.EvidenceKeeper), + feeGrant.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), + genutil.NewAppModule( + app.AccountKeeper, app.StakingKeeper, app.BaseApp.DeliverTx, + encodingConfig.TxConfig, + ), + gov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper), + group.NewAppModule(appCodec, app.GroupKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), + mint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper, minttypes.DefaultInflationCalculationFn), + params.NewAppModule(app.ParamsKeeper), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + staking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), + upgrade.NewAppModule(app.UpgradeKeeper), + vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), + + // IBC + ibc.NewAppModule(app.IBCKeeper), + ibcFee.NewAppModule(app.IBCFeeKeeper), + ibcTransfer.NewAppModule(app.IBCTransferKeeper), + ica.NewAppModule(nil, &app.ICAHostKeeper), + + // KYVE + bundles.NewAppModule(appCodec, app.BundlesKeeper, app.AccountKeeper, app.BankKeeper), + delegation.NewAppModule(appCodec, app.DelegationKeeper, app.AccountKeeper, app.BankKeeper), + global.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.GlobalKeeper, app.UpgradeKeeper), + pool.NewAppModule(appCodec, app.PoolKeeper, app.AccountKeeper, app.BankKeeper), + query.NewAppModule(appCodec, app.QueryKeeper, app.AccountKeeper, app.BankKeeper), + stakers.NewAppModule(appCodec, app.StakersKeeper, app.AccountKeeper, app.BankKeeper), + team.NewAppModule(appCodec, app.BankKeeper, app.MintKeeper, app.TeamKeeper, app.UpgradeKeeper), + ) + + // During begin block slashing happens after distr.BeginBlocker so that + // there is nothing left over in the validator fee pool, so as to keep the + // CanWithdrawInvariant invariant. + // NOTE: staking module is required if HistoricalEntries param > 0 + app.mm.SetOrderBeginBlockers( + // upgrades should be run first + upgradetypes.ModuleName, + capabilitytypes.ModuleName, + minttypes.ModuleName, + // NOTE: x/team must be run before x/distribution and after x/mint. + teamTypes.ModuleName, + distrtypes.ModuleName, + slashingtypes.ModuleName, + evidencetypes.ModuleName, + stakingtypes.ModuleName, + authtypes.ModuleName, + banktypes.ModuleName, + govtypes.ModuleName, + crisistypes.ModuleName, + ibcFeeTypes.ModuleName, + ibcTransferTypes.ModuleName, + ibchost.ModuleName, + icatypes.ModuleName, + genutiltypes.ModuleName, + authzTypes.ModuleName, + feegrant.ModuleName, + groupTypes.ModuleName, + paramstypes.ModuleName, + vestingtypes.ModuleName, + + // this line is used by starport scaffolding # stargate/app/beginBlockers + poolTypes.ModuleName, + stakersTypes.ModuleName, + delegationTypes.ModuleName, + bundlesTypes.ModuleName, + queryTypes.ModuleName, + globalTypes.ModuleName, + ) + + app.mm.SetOrderEndBlockers( + crisistypes.ModuleName, + govtypes.ModuleName, + stakingtypes.ModuleName, + ibcFeeTypes.ModuleName, + ibcTransferTypes.ModuleName, + ibchost.ModuleName, + icatypes.ModuleName, + capabilitytypes.ModuleName, + authtypes.ModuleName, + banktypes.ModuleName, + distrtypes.ModuleName, + slashingtypes.ModuleName, + minttypes.ModuleName, + genutiltypes.ModuleName, + evidencetypes.ModuleName, + authzTypes.ModuleName, + feegrant.ModuleName, + groupTypes.ModuleName, + paramstypes.ModuleName, + upgradetypes.ModuleName, + vestingtypes.ModuleName, + + // this line is used by starport scaffolding # stargate/app/endBlockers + poolTypes.ModuleName, + stakersTypes.ModuleName, + delegationTypes.ModuleName, + bundlesTypes.ModuleName, + queryTypes.ModuleName, + globalTypes.ModuleName, + teamTypes.ModuleName, + ) + + // NOTE: The genutils module must occur after staking so that pools are + // properly initialized with tokens from genesis accounts. + // NOTE: Capability module must occur first so that it can initialize any capabilities + // so that other modules that want to create or claim capabilities afterwards in InitChain + // can do so safely. + app.mm.SetOrderInitGenesis( + capabilitytypes.ModuleName, + authtypes.ModuleName, + banktypes.ModuleName, + distrtypes.ModuleName, + stakingtypes.ModuleName, + slashingtypes.ModuleName, + govtypes.ModuleName, + minttypes.ModuleName, + crisistypes.ModuleName, + genutiltypes.ModuleName, + ibcFeeTypes.ModuleName, + ibcTransferTypes.ModuleName, + ibchost.ModuleName, + icatypes.ModuleName, + evidencetypes.ModuleName, + authzTypes.ModuleName, + feegrant.ModuleName, + groupTypes.ModuleName, + paramstypes.ModuleName, + upgradetypes.ModuleName, + vestingtypes.ModuleName, + + // this line is used by starport scaffolding # stargate/app/initGenesis + poolTypes.ModuleName, + stakersTypes.ModuleName, + delegationTypes.ModuleName, + bundlesTypes.ModuleName, + queryTypes.ModuleName, + globalTypes.ModuleName, + teamTypes.ModuleName, + ) + + // Uncomment if you want to set a custom migration order here. + // app.mm.SetOrderMigrations(custom order) + + app.mm.RegisterInvariants(&app.CrisisKeeper) + app.mm.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) + + app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) + app.mm.RegisterServices(app.configurator) + + // initialize stores + app.MountKVStores(keys) + app.MountTransientStores(tkeys) + app.MountMemoryStores(memKeys) + + // initialize BaseApp + var err error + upgradeInfo, err = app.UpgradeKeeper.ReadUpgradeInfoFromDisk() + if err != nil { + panic(err) + } + + anteHandler, err := NewAnteHandler( + app.AccountKeeper, + app.BankKeeper, + app.FeeGrantKeeper, + app.GlobalKeeper, + app.GovKeeper, + app.IBCKeeper, + app.StakingKeeper, + ante.DefaultSigVerificationGasConsumer, + encodingConfig.TxConfig.SignModeHandler(), + ) + if err != nil { + panic(fmt.Errorf("failed to create AnteHandler: %s", err)) + } + + postHandler, err := NewPostHandler( + app.BankKeeper, + app.FeeGrantKeeper, + app.GlobalKeeper, + ) + if err != nil { + panic(fmt.Errorf("failed to create PostHandler: %s", err)) + } + + app.SetAnteHandler(anteHandler) + app.SetPostHandler(postHandler) + app.SetInitChainer(app.InitChainer) + app.SetBeginBlocker(app.BeginBlocker) + app.SetEndBlocker(app.EndBlocker) + + app.UpgradeKeeper.SetUpgradeHandler( + v070.UpgradeName, + v070.CreateUpgradeHandler( + app.mm, + app.configurator, + app.PoolKeeper, + app.StakersKeeper, + keys[govtypes.StoreKey], + app.GlobalKeeper, + ), + ) + + if upgradeInfo.Name == v070.UpgradeName && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { + app.SetStoreLoader(v070.CreateStoreLoader(upgradeInfo.Height)) + } + + if loadLatest { + if err := app.LoadLatestVersion(); err != nil { + tmos.Exit(err.Error()) + } + } + + app.ScopedIBCKeeper = scopedIBCKeeper + app.ScopedIBCTransferKeeper = scopedIBCTransferKeeper + app.ScopedICAHostKeeper = scopedICAHostKeeper + + return app +} + +// Name returns the name of the App +func (app *App) Name() string { return app.BaseApp.Name() } + +// GetBaseApp returns the base app of the application +func (app App) GetBaseApp() *baseapp.BaseApp { return app.BaseApp } + +// BeginBlocker application updates every begin block +func (app *App) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { + return app.mm.BeginBlock(ctx, req) +} + +// EndBlocker application updates every end block +func (app *App) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + return app.mm.EndBlock(ctx, req) +} + +// InitChainer application update at chain initialization +func (app *App) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { + var genesisState GenesisState + if err := tmjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { + panic(err) + } + app.UpgradeKeeper.SetModuleVersionMap(ctx, app.mm.GetVersionMap()) + return app.mm.InitGenesis(ctx, app.appCodec, genesisState) +} + +// LoadHeight loads a particular height +func (app *App) LoadHeight(height int64) error { + return app.LoadVersion(height) +} + +// LegacyAmino returns SimApp's amino codec. +// +// NOTE: This is solely to be used for testing purposes as it may be desirable +// for modules to register their own custom testing types. +func (app *App) LegacyAmino() *codec.LegacyAmino { + return app.cdc +} + +// AppCodec returns an app codec. +// +// NOTE: This is solely to be used for testing purposes as it may be desirable +// for modules to register their own custom testing types. +func (app *App) AppCodec() codec.Codec { + return app.appCodec +} + +// InterfaceRegistry returns an InterfaceRegistry +func (app *App) InterfaceRegistry() types.InterfaceRegistry { + return app.interfaceRegistry +} + +// GetKey returns the KVStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (app *App) GetKey(storeKey string) *storetypes.KVStoreKey { + return app.keys[storeKey] +} + +// GetTKey returns the TransientStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (app *App) GetTKey(storeKey string) *storetypes.TransientStoreKey { + return app.tkeys[storeKey] +} + +// GetMemKey returns the MemStoreKey for the provided mem key. +// +// NOTE: This is solely used for testing purposes. +func (app *App) GetMemKey(storeKey string) *storetypes.MemoryStoreKey { + return app.memKeys[storeKey] +} + +// GetSubspace returns a param subspace for a given module name. +// +// NOTE: This is solely to be used for testing purposes. +func (app *App) GetSubspace(moduleName string) paramstypes.Subspace { + subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) + return subspace +} + +// RegisterAPIRoutes registers all application module routes with the provided +// API server. +func (app *App) RegisterAPIRoutes(apiSvr *api.Server, _ config.APIConfig) { + clientCtx := apiSvr.ClientCtx + // Register new tx routes from grpc-gateway. + authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) + // Register new tendermint queries routes from grpc-gateway. + tmservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) + + // Register grpc-gateway routes for all modules. + ModuleBasics.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) + + // register swagger API + apiSvr.Router.Handle("/swagger.yml", http.FileServer(http.FS(kyveDocs.Swagger))) + apiSvr.Router.HandleFunc("/", kyveDocs.Handler(Name, "/swagger.yml")) +} + +// RegisterTxService implements the Application.RegisterTxService method. +func (app *App) RegisterTxService(clientCtx client.Context) { + authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry) +} + +// RegisterTendermintService implements the Application.RegisterTendermintService method. +func (app *App) RegisterTendermintService(clientCtx client.Context) { + tmservice.RegisterTendermintService( + clientCtx, + app.BaseApp.GRPCQueryRouter(), + app.interfaceRegistry, + app.Query, + ) +} + +// SimulationManager implements the SimulationApp interface. +// NOTE: We simply return nil as we don't use the simulation manager anywhere. +func (app *App) SimulationManager() *module.SimulationManager { return nil } diff --git a/app/encoding.go b/app/encoding.go new file mode 100644 index 00000000..4c42fa0f --- /dev/null +++ b/app/encoding.go @@ -0,0 +1,50 @@ +// https://github.com/ignite/cli/blob/v0.25.1/ignite/pkg/cosmoscmd/encoding.go + +package app + +import ( + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/std" + "github.com/cosmos/cosmos-sdk/x/auth/tx" +) + +// EncodingConfig specifies the concrete encoding types to use for a given app. +// This is provided for compatibility between protobuf and amino implementations. +type EncodingConfig struct { + InterfaceRegistry codecTypes.InterfaceRegistry + Marshaler codec.Codec + TxConfig client.TxConfig + Amino *codec.LegacyAmino +} + +// NewEncodingConfig creates an EncodingConfig instance. +func NewEncodingConfig() EncodingConfig { + amino := codec.NewLegacyAmino() + interfaceRegistry := codecTypes.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + txCfg := tx.NewTxConfig(marshaler, tx.DefaultSignModes) + + encodingConfig := EncodingConfig{ + InterfaceRegistry: interfaceRegistry, + Marshaler: marshaler, + TxConfig: txCfg, + Amino: amino, + } + + return encodingConfig +} + +// MakeEncodingConfig creates an EncodingConfig instance. +// It registers types with both the codec and interface registry. +func MakeEncodingConfig() EncodingConfig { + encodingConfig := NewEncodingConfig() + + std.RegisterLegacyAminoCodec(encodingConfig.Amino) + std.RegisterInterfaces(encodingConfig.InterfaceRegistry) + ModuleBasics.RegisterLegacyAminoCodec(encodingConfig.Amino) + ModuleBasics.RegisterInterfaces(encodingConfig.InterfaceRegistry) + + return encodingConfig +} diff --git a/app/export.go b/app/export.go new file mode 100644 index 00000000..da4565e6 --- /dev/null +++ b/app/export.go @@ -0,0 +1,194 @@ +package app + +import ( + "encoding/json" + "log" + + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + servertypes "github.com/cosmos/cosmos-sdk/server/types" + sdk "github.com/cosmos/cosmos-sdk/types" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/cosmos/cosmos-sdk/x/staking" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// ExportAppStateAndValidators exports the state of the application for a genesis +// file. +func (app *App) ExportAppStateAndValidators( + forZeroHeight bool, jailAllowedAddrs []string, +) (servertypes.ExportedApp, error) { + // as if they could withdraw from the start of the next block + ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()}) + + // We export at last height + 1, because that's the height at which + // Tendermint will start InitChain. + height := app.LastBlockHeight() + 1 + if forZeroHeight { + height = 0 + app.prepForZeroHeightGenesis(ctx, jailAllowedAddrs) + } + + genState := app.mm.ExportGenesis(ctx, app.appCodec) + appState, err := json.MarshalIndent(genState, "", " ") + if err != nil { + return servertypes.ExportedApp{}, err + } + + validators, err := staking.WriteValidators(ctx, app.StakingKeeper) + if err != nil { + return servertypes.ExportedApp{}, err + } + return servertypes.ExportedApp{ + AppState: appState, + Validators: validators, + Height: height, + ConsensusParams: app.BaseApp.GetConsensusParams(ctx), + }, nil +} + +// prepare for fresh start at zero height +// NOTE zero height genesis is a temporary feature which will be deprecated +// +// in favour of export at a block height +func (app *App) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) { + applyAllowedAddrs := false + + // check if there is a allowed address list + if len(jailAllowedAddrs) > 0 { + applyAllowedAddrs = true + } + + allowedAddrsMap := make(map[string]bool) + + for _, addr := range jailAllowedAddrs { + _, err := sdk.ValAddressFromBech32(addr) + if err != nil { + log.Fatal(err) + } + allowedAddrsMap[addr] = true + } + + /* Just to be safe, assert the invariants on current state. */ + app.CrisisKeeper.AssertInvariants(ctx) + + /* Handle fee distribution state. */ + + // withdraw all validator commission + app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + _, err := app.DistributionKeeper.WithdrawValidatorCommission(ctx, val.GetOperator()) + if err != nil { + panic(err) + } + return false + }) + + // withdraw all delegator rewards + dels := app.StakingKeeper.GetAllDelegations(ctx) + for _, delegation := range dels { + _, err := app.DistributionKeeper.WithdrawDelegationRewards(ctx, delegation.GetDelegatorAddr(), delegation.GetValidatorAddr()) + if err != nil { + panic(err) + } + } + + // clear validator slash events + app.DistributionKeeper.DeleteAllValidatorSlashEvents(ctx) + + // clear validator historical rewards + app.DistributionKeeper.DeleteAllValidatorHistoricalRewards(ctx) + + // set context height to zero + height := ctx.BlockHeight() + ctx = ctx.WithBlockHeight(0) + + // reinitialize all validators + app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + // donate any unwithdrawn outstanding reward fraction tokens to the community pool + scraps := app.DistributionKeeper.GetValidatorOutstandingRewardsCoins(ctx, val.GetOperator()) + feePool := app.DistributionKeeper.GetFeePool(ctx) + feePool.CommunityPool = feePool.CommunityPool.Add(scraps...) + app.DistributionKeeper.SetFeePool(ctx, feePool) + + err := app.DistributionKeeper.Hooks().AfterValidatorCreated(ctx, val.GetOperator()) + if err != nil { + panic(err) + } + return false + }) + + // reinitialize all delegations + for _, del := range dels { + err := app.DistributionKeeper.Hooks().BeforeDelegationCreated(ctx, del.GetDelegatorAddr(), del.GetValidatorAddr()) + if err != nil { + panic(err) + } + err = app.DistributionKeeper.Hooks().AfterDelegationModified(ctx, del.GetDelegatorAddr(), del.GetValidatorAddr()) + if err != nil { + panic(err) + } + } + + // reset context height + ctx = ctx.WithBlockHeight(height) + + /* Handle staking state. */ + + // iterate through redelegations, reset creation height + app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) { + for i := range red.Entries { + red.Entries[i].CreationHeight = 0 + } + app.StakingKeeper.SetRedelegation(ctx, red) + return false + }) + + // iterate through unbonding delegations, reset creation height + app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) { + for i := range ubd.Entries { + ubd.Entries[i].CreationHeight = 0 + } + app.StakingKeeper.SetUnbondingDelegation(ctx, ubd) + return false + }) + + // Iterate through validators by power descending, reset bond heights, and + // update bond intra-tx counters. + store := ctx.KVStore(app.keys[stakingtypes.StoreKey]) + iter := sdk.KVStoreReversePrefixIterator(store, stakingtypes.ValidatorsKey) + counter := int16(0) + + for ; iter.Valid(); iter.Next() { + addr := sdk.ValAddress(iter.Key()[1:]) + validator, found := app.StakingKeeper.GetValidator(ctx, addr) + if !found { + panic("expected validator, not found") + } + + validator.UnbondingHeight = 0 + if applyAllowedAddrs && !allowedAddrsMap[addr.String()] { + validator.Jailed = true + } + + app.StakingKeeper.SetValidator(ctx, validator) + counter++ + } + + iter.Close() + + if _, err := app.StakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx); err != nil { + panic(err) + } + + /* Handle slashing state. */ + + // reset start height on signing infos + app.SlashingKeeper.IterateValidatorSigningInfos( + ctx, + func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { + info.StartHeight = 0 + app.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info) + return false + }, + ) +} diff --git a/app/forks.go b/app/forks.go new file mode 100644 index 00000000..e8dfd06a --- /dev/null +++ b/app/forks.go @@ -0,0 +1,13 @@ +package app + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func BeginBlockForks(ctx sdk.Context, app *App) { + switch ctx.BlockHeight() { + default: + // do nothing + return + } +} diff --git a/app/genesis.go b/app/genesis.go new file mode 100644 index 00000000..48ce2a22 --- /dev/null +++ b/app/genesis.go @@ -0,0 +1,21 @@ +package app + +import ( + "encoding/json" + + "github.com/cosmos/cosmos-sdk/codec" +) + +// The GenesisState of the blockchain is represented here as a map of raw json +// messages key'd by a identifier string. +// The identifier is used to determine which module genesis information belongs +// to so it may be appropriately routed during init chain. +// Within this application default genesis information is retrieved from +// the ModuleBasicManager which populates json from each BasicModule +// object provided to it during init. +type GenesisState map[string]json.RawMessage + +// NewDefaultGenesisState generates the default state for the application. +func NewDefaultGenesisState(cdc codec.JSONCodec) GenesisState { + return ModuleBasics.DefaultGenesis(cdc) +} diff --git a/app/keepers.go b/app/keepers.go new file mode 100644 index 00000000..22517c48 --- /dev/null +++ b/app/keepers.go @@ -0,0 +1,130 @@ +package app + +import ( + "github.com/cosmos/cosmos-sdk/codec" + storeTypes "github.com/cosmos/cosmos-sdk/store/types" + + // Auth + authKeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + // Authz + authzKeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + bankTypes "github.com/cosmos/cosmos-sdk/x/bank/types" + // Bundles + bundlesKeeper "github.com/KYVENetwork/chain/x/bundles/keeper" + // Capability + capabilityKeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + // Crisis + crisisKeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" + crisisTypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + // Delegation + delegationKeeper "github.com/KYVENetwork/chain/x/delegation/keeper" + // Distribution + distributionKeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + distributionTypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + // Evidence + evidenceKeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" + // FeeGrant + feeGrantKeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + // Global + globalKeeper "github.com/KYVENetwork/chain/x/global/keeper" + // Governance + govKeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + govV1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Group + groupKeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" + // IBC + ibcHost "github.com/cosmos/ibc-go/v5/modules/core/24-host" + ibcKeeper "github.com/cosmos/ibc-go/v5/modules/core/keeper" + // IBC Fee + ibcFeeKeeper "github.com/cosmos/ibc-go/v5/modules/apps/29-fee/keeper" + // IBC Transfer + ibcTransferKeeper "github.com/cosmos/ibc-go/v5/modules/apps/transfer/keeper" + ibcTransferTypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + // ICA Host + icaHostKeeper "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host/keeper" + icaHostTypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/host/types" + // Mint + mintKeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + mintTypes "github.com/cosmos/cosmos-sdk/x/mint/types" + // Parameters + paramsKeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" + // Pool + poolKeeper "github.com/KYVENetwork/chain/x/pool/keeper" + // Query + queryKeeper "github.com/KYVENetwork/chain/x/query/keeper" + // Slashing + slashingKeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" + slashingTypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + // Stakers + stakersKeeper "github.com/KYVENetwork/chain/x/stakers/keeper" + // Staking + stakingKeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingTypes "github.com/cosmos/cosmos-sdk/x/staking/types" + // Team + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + // Upgrade + upgradeKeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" +) + +type Keepers struct { + // Cosmos SDK + AccountKeeper authKeeper.AccountKeeper + AuthzKeeper authzKeeper.Keeper + BankKeeper bankKeeper.Keeper + CapabilityKeeper *capabilityKeeper.Keeper + CrisisKeeper crisisKeeper.Keeper + DistributionKeeper distributionKeeper.Keeper + EvidenceKeeper evidenceKeeper.Keeper + FeeGrantKeeper feeGrantKeeper.Keeper + GovKeeper govKeeper.Keeper + GroupKeeper groupKeeper.Keeper + MintKeeper mintKeeper.Keeper + ParamsKeeper paramsKeeper.Keeper + SlashingKeeper slashingKeeper.Keeper + StakingKeeper stakingKeeper.Keeper + UpgradeKeeper upgradeKeeper.Keeper + + // IBC + IBCKeeper *ibcKeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly + IBCFeeKeeper ibcFeeKeeper.Keeper + IBCTransferKeeper ibcTransferKeeper.Keeper + ICAHostKeeper icaHostKeeper.Keeper + + // KYVE + BundlesKeeper bundlesKeeper.Keeper + DelegationKeeper delegationKeeper.Keeper + GlobalKeeper globalKeeper.Keeper + PoolKeeper poolKeeper.Keeper + QueryKeeper queryKeeper.Keeper + StakersKeeper stakersKeeper.Keeper + TeamKeeper teamKeeper.Keeper + + // ----- Scoped Keepers ----- + // make scoped keepers public for test purposes + ScopedIBCKeeper capabilityKeeper.ScopedKeeper + ScopedIBCTransferKeeper capabilityKeeper.ScopedKeeper + ScopedICAHostKeeper capabilityKeeper.ScopedKeeper +} + +// initParamsKeeper init params keeper and its subspaces +func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storeTypes.StoreKey) paramsKeeper.Keeper { + keeper := paramsKeeper.NewKeeper(appCodec, legacyAmino, key, tkey) + + keeper.Subspace(authTypes.ModuleName) + keeper.Subspace(bankTypes.ModuleName) + keeper.Subspace(stakingTypes.ModuleName) + keeper.Subspace(mintTypes.ModuleName) + keeper.Subspace(distributionTypes.ModuleName) + keeper.Subspace(slashingTypes.ModuleName) + keeper.Subspace(govTypes.ModuleName).WithKeyTable(govV1.ParamKeyTable()) + keeper.Subspace(crisisTypes.ModuleName) + keeper.Subspace(ibcTransferTypes.ModuleName) + keeper.Subspace(ibcHost.ModuleName) + keeper.Subspace(icaHostTypes.SubModuleName) + + return keeper +} diff --git a/app/modules.go b/app/modules.go new file mode 100644 index 00000000..2d0ed6e6 --- /dev/null +++ b/app/modules.go @@ -0,0 +1,152 @@ +package app + +import ( + teamTypes "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/types/module" + + // Auth + "github.com/cosmos/cosmos-sdk/x/auth" + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/auth/vesting" + // Authz + authz "github.com/cosmos/cosmos-sdk/x/authz/module" + // Bank + "github.com/cosmos/cosmos-sdk/x/bank" + // Bundles + "github.com/KYVENetwork/chain/x/bundles" + bundlesTypes "github.com/KYVENetwork/chain/x/bundles/types" + // Capability + "github.com/cosmos/cosmos-sdk/x/capability" + // Crisis + "github.com/cosmos/cosmos-sdk/x/crisis" + // Delegation + "github.com/KYVENetwork/chain/x/delegation" + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + // Distribution + "github.com/cosmos/cosmos-sdk/x/distribution" + distributionTypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + // Evidence + "github.com/cosmos/cosmos-sdk/x/evidence" + // FeeGrant + feeGrant "github.com/cosmos/cosmos-sdk/x/feegrant/module" + // GenUtil + "github.com/cosmos/cosmos-sdk/x/genutil" + // Global + "github.com/KYVENetwork/chain/x/global" + // Governance + "github.com/cosmos/cosmos-sdk/x/gov" + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Group + group "github.com/cosmos/cosmos-sdk/x/group/module" + // IBC + ibc "github.com/cosmos/ibc-go/v5/modules/core" + // IBC Fee + ibcFee "github.com/cosmos/ibc-go/v5/modules/apps/29-fee" + ibcFeeTypes "github.com/cosmos/ibc-go/v5/modules/apps/29-fee/types" + // IBC Transfer + ibcTransfer "github.com/cosmos/ibc-go/v5/modules/apps/transfer" + ibcTransferTypes "github.com/cosmos/ibc-go/v5/modules/apps/transfer/types" + // ICA + ica "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts" + icaTypes "github.com/cosmos/ibc-go/v5/modules/apps/27-interchain-accounts/types" + // Mint + "github.com/cosmos/cosmos-sdk/x/mint" + mintTypes "github.com/cosmos/cosmos-sdk/x/mint/types" + // Parameters + "github.com/cosmos/cosmos-sdk/x/params" + // Pool + "github.com/KYVENetwork/chain/x/pool" + poolTypes "github.com/KYVENetwork/chain/x/pool/types" + // Query + "github.com/KYVENetwork/chain/x/query" + // Slashing + "github.com/cosmos/cosmos-sdk/x/slashing" + // Stakers + "github.com/KYVENetwork/chain/x/stakers" + stakersTypes "github.com/KYVENetwork/chain/x/stakers/types" + // Staking + "github.com/cosmos/cosmos-sdk/x/staking" + stakingTypes "github.com/cosmos/cosmos-sdk/x/staking/types" + // Team + "github.com/KYVENetwork/chain/x/team" + // Upgrade + "github.com/cosmos/cosmos-sdk/x/upgrade" +) + +// appModuleBasics returns ModuleBasics for the module BasicManager. +var appModuleBasics = []module.AppModuleBasic{ + // Cosmos SDK + auth.AppModuleBasic{}, + authz.AppModuleBasic{}, + bank.AppModuleBasic{}, + capability.AppModuleBasic{}, + crisis.AppModuleBasic{}, + distribution.AppModuleBasic{}, + evidence.AppModuleBasic{}, + feeGrant.AppModuleBasic{}, + genutil.AppModuleBasic{}, + gov.NewAppModuleBasic(getGovProposalHandlers()), + group.AppModuleBasic{}, + mint.AppModuleBasic{}, + params.AppModuleBasic{}, + slashing.AppModuleBasic{}, + staking.AppModuleBasic{}, + upgrade.AppModuleBasic{}, + vesting.AppModuleBasic{}, + + // IBC + ibc.AppModuleBasic{}, + ibcFee.AppModuleBasic{}, + ibcTransfer.AppModuleBasic{}, + ica.AppModuleBasic{}, + + // KYVE + bundles.AppModuleBasic{}, + delegation.AppModuleBasic{}, + global.AppModuleBasic{}, + pool.AppModuleBasic{}, + query.AppModuleBasic{}, + stakers.AppModuleBasic{}, + team.AppModuleBasic{}, +} + +// moduleAccountPermissions ... +var moduleAccountPermissions = map[string][]string{ + // Cosmos SDK + authTypes.FeeCollectorName: {authTypes.Burner}, + distributionTypes.ModuleName: nil, + govTypes.ModuleName: {authTypes.Burner}, + mintTypes.ModuleName: {authTypes.Minter}, + stakingTypes.BondedPoolName: {authTypes.Burner, authTypes.Staking}, + stakingTypes.NotBondedPoolName: {authTypes.Burner, authTypes.Staking}, + + // IBC + ibcTransferTypes.ModuleName: {authTypes.Minter, authTypes.Burner}, + ibcFeeTypes.ModuleName: nil, + icaTypes.ModuleName: nil, + + // KYVE + bundlesTypes.ModuleName: nil, + delegationTypes.ModuleName: nil, + poolTypes.ModuleName: nil, + stakersTypes.ModuleName: nil, + teamTypes.ModuleName: nil, +} + +// BlockedModuleAccountAddrs returns all the app's blocked module account addresses. +func (app *App) BlockedModuleAccountAddrs() map[string]bool { + modAccAddrs := app.ModuleAccountAddrs() + delete(modAccAddrs, authTypes.NewModuleAddress(govTypes.ModuleName).String()) + + return modAccAddrs +} + +// ModuleAccountAddrs returns all the app's module account addresses. +func (app *App) ModuleAccountAddrs() map[string]bool { + modAccAddrs := make(map[string]bool) + for acc := range maccPerms { + modAccAddrs[authTypes.NewModuleAddress(acc).String()] = true + } + + return modAccAddrs +} diff --git a/app/test_helpers.go b/app/test_helpers.go new file mode 100644 index 00000000..e5e9a097 --- /dev/null +++ b/app/test_helpers.go @@ -0,0 +1,163 @@ +package app + +import ( + "encoding/json" + "time" + + globalTypes "github.com/KYVENetwork/chain/x/global/types" + + teamTypes "github.com/KYVENetwork/chain/x/team/types" + + "cosmossdk.io/math" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + "github.com/cosmos/cosmos-sdk/simapp" + "github.com/cosmos/cosmos-sdk/testutil/mock" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + dbm "github.com/tendermint/tm-db" + + tmtypes "github.com/tendermint/tendermint/types" +) + +// DefaultConsensusParams ... +var DefaultConsensusParams = &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxBytes: 200000, + MaxGas: -1, // no limit + }, + Evidence: &tmproto.EvidenceParams{ + MaxAgeNumBlocks: 302400, + MaxAgeDuration: 504 * time.Hour, // 3 weeks is the max duration + MaxBytes: 10000, + }, + Validator: &tmproto.ValidatorParams{ + PubKeyTypes: []string{ + tmtypes.ABCIPubKeyTypeEd25519, + }, + }, +} + +func DefaultGenesisWithValSet(codec codec.Codec) map[string]json.RawMessage { + bondingDenom := globalTypes.Denom + + // Generate a new validator. + key, _ := mock.NewPV().GetPubKey() + validator := tmtypes.NewValidator(key, 1) + + publicKey, _ := cryptocodec.FromTmPubKeyInterface(validator.PubKey) + publicKeyAny, _ := codectypes.NewAnyWithValue(publicKey) + + validators := []stakingtypes.Validator{ + { + OperatorAddress: sdk.ValAddress(validator.Address).String(), + ConsensusPubkey: publicKeyAny, + Jailed: false, + Status: stakingtypes.Bonded, + Tokens: sdk.DefaultPowerReduction, + DelegatorShares: sdk.OneDec(), + Description: stakingtypes.Description{}, + UnbondingHeight: 0, + UnbondingTime: time.Unix(0, 0).UTC(), + Commission: stakingtypes.NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), + MinSelfDelegation: math.ZeroInt(), + }, + } + // Generate a new delegator. + delegatorKey := secp256k1.GenPrivKey() + delegator := authtypes.NewBaseAccount( + delegatorKey.PubKey().Address().Bytes(), delegatorKey.PubKey(), 0, 0, + ) + + delegations := []stakingtypes.Delegation{ + stakingtypes.NewDelegation(delegator.GetAddress(), validator.Address.Bytes(), sdk.OneDec()), + } + + // Default genesis state. + config := MakeEncodingConfig() + genesisState := ModuleBasics.DefaultGenesis(config.Marshaler) + + // Update x/auth state. + authGenesis := authtypes.NewGenesisState(authtypes.DefaultParams(), []authtypes.GenesisAccount{delegator}) + genesisState[authtypes.ModuleName] = codec.MustMarshalJSON(authGenesis) + + // Update x/bank state. + bondedCoins := sdk.NewCoins(sdk.NewCoin(bondingDenom, sdk.DefaultPowerReduction)) + + teamCoins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(teamTypes.TEAM_ALLOCATION))) + + bankGenesis := banktypes.NewGenesisState(banktypes.DefaultGenesisState().Params, []banktypes.Balance{ + { + Address: authtypes.NewModuleAddress(stakingtypes.BondedPoolName).String(), + Coins: bondedCoins, + }, + { + Address: authtypes.NewModuleAddress(teamTypes.ModuleName).String(), + Coins: teamCoins, + }, + }, bondedCoins.Add(sdk.NewInt64Coin(globalTypes.Denom, int64(teamTypes.TEAM_ALLOCATION))), []banktypes.Metadata{}) + genesisState[banktypes.ModuleName] = codec.MustMarshalJSON(bankGenesis) + + // Update x/staking state. + stakingParams := stakingtypes.DefaultParams() + stakingParams.BondDenom = bondingDenom + + stakingGenesis := stakingtypes.NewGenesisState(stakingParams, validators, delegations) + genesisState[stakingtypes.ModuleName] = codec.MustMarshalJSON(stakingGenesis) + + // Return. + return genesisState +} + +// Setup initializes a new App. +func Setup() *App { + db := dbm.NewMemDB() + + config := MakeEncodingConfig() + + setPrefixes("kyve") + + app := NewKYVEApp(log.NewNopLogger(), db, nil, true, map[int64]bool{}, DefaultNodeHome, 5, config, simapp.EmptyAppOptions{}) + // init chain must be called to stop deliverState from being nil + + genesisState := DefaultGenesisWithValSet(app.AppCodec()) + stateBytes, err := json.MarshalIndent(genesisState, "", " ") + if err != nil { + panic(err) + } + + // Initialize the chain + app.InitChain( + abci.RequestInitChain{ + ChainId: "kyve-test", + Validators: []abci.ValidatorUpdate{}, + ConsensusParams: DefaultConsensusParams, + AppStateBytes: stateBytes, + }, + ) + + return app +} + +func setPrefixes(accountAddressPrefix string) { + // Set prefixes + accountPubKeyPrefix := accountAddressPrefix + "pub" + validatorAddressPrefix := accountAddressPrefix + "valoper" + validatorPubKeyPrefix := accountAddressPrefix + "valoperpub" + consNodeAddressPrefix := accountAddressPrefix + "valcons" + consNodePubKeyPrefix := accountAddressPrefix + "valconspub" + + // Set and seal config + config := sdk.GetConfig() + config.SetBech32PrefixForAccount(accountAddressPrefix, accountPubKeyPrefix) + config.SetBech32PrefixForValidator(validatorAddressPrefix, validatorPubKeyPrefix) + config.SetBech32PrefixForConsensusNode(consNodeAddressPrefix, consNodePubKeyPrefix) +} diff --git a/app/upgrades/v0_8_0/constants.go b/app/upgrades/v0_8_0/constants.go new file mode 100644 index 00000000..c2e81916 --- /dev/null +++ b/app/upgrades/v0_8_0/constants.go @@ -0,0 +1,4 @@ +package v080 + +// UpgradeName defines the on-chain upgrade name for the KYVE v0.8.0 upgrade. +const UpgradeName = "v0.8.0" diff --git a/app/upgrades/v0_8_0/store.go b/app/upgrades/v0_8_0/store.go new file mode 100644 index 00000000..ad5100c3 --- /dev/null +++ b/app/upgrades/v0_8_0/store.go @@ -0,0 +1,26 @@ +package v080 + +import ( + globalTypes "github.com/KYVENetwork/chain/x/global/types" + teamTypes "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/baseapp" + storeTypes "github.com/cosmos/cosmos-sdk/store/types" + // Upgrade + upgradeTypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +func CreateStoreLoader(upgradeHeight int64) baseapp.StoreLoader { + storeUpgrades := storeTypes.StoreUpgrades{ + Added: []string{ + // kyve + globalTypes.StoreKey, + teamTypes.StoreKey, + }, + Deleted: []string{ + "registry", + "fees", + }, + } + + return upgradeTypes.UpgradeStoreLoader(upgradeHeight, &storeUpgrades) +} diff --git a/app/upgrades/v0_8_0/upgrade.go b/app/upgrades/v0_8_0/upgrade.go new file mode 100644 index 00000000..bc130c86 --- /dev/null +++ b/app/upgrades/v0_8_0/upgrade.go @@ -0,0 +1,75 @@ +package v080 + +import ( + "fmt" + + types2 "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/x/gov/types" + + poolKeeper "github.com/KYVENetwork/chain/x/pool/keeper" + stakersKeeper "github.com/KYVENetwork/chain/x/stakers/keeper" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + // Global + globalKeeper "github.com/KYVENetwork/chain/x/global/keeper" + globalTypes "github.com/KYVENetwork/chain/x/global/types" + // Upgrade + upgradeTypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +func CreateUpgradeHandler( + mm *module.Manager, + configurator module.Configurator, + pk poolKeeper.Keeper, + sk stakersKeeper.Keeper, + govStoreKey *types2.KVStoreKey, + globalKeeper globalKeeper.Keeper, +) upgradeTypes.UpgradeHandler { + return func(ctx sdk.Context, _ upgradeTypes.Plan, vm module.VersionMap) (module.VersionMap, error) { + cosmosVersionmap, err := mm.RunMigrations(ctx, configurator, vm) + // kyve params + ctx.Logger().Info("Init global module.") + InitGlobalParams(ctx, globalKeeper) + + ctx.Logger().Info("Remove all stakers from disabled pool.") + RemoveStakersFromDisabledPools(ctx, pk, sk) + + ParseProposals(ctx, govStoreKey) + + return cosmosVersionmap, err + } +} + +func RemoveStakersFromDisabledPools(ctx sdk.Context, pk poolKeeper.Keeper, sk stakersKeeper.Keeper) { + for _, pool := range pk.GetAllPools(ctx) { + if pool.Disabled { + for _, staker := range sk.GetAllStakerAddressesOfPool(ctx, pool.Id) { + sk.LeavePool(ctx, staker, pool.Id) + ctx.Logger().Info(fmt.Sprintf("Remove %s from pool %s.\n", staker, pool.Name)) + } + } + } +} + +// InitGlobalParams ... +func InitGlobalParams(ctx sdk.Context, globalKeeper globalKeeper.Keeper) { + params := globalTypes.DefaultParams() + + minInitialRatio, _ := sdk.NewDecFromStr("0.25") + params.MinInitialDepositRatio = minInitialRatio + + params.MinGasPrice = sdk.NewDec(1) + + burnRatio, _ := sdk.NewDecFromStr("0.2") + params.BurnRatio = burnRatio + + globalKeeper.SetParams(ctx, params) +} + +func ParseProposals(ctx sdk.Context, storeKey *types2.KVStoreKey) { + store := ctx.KVStore(storeKey) + for i := 1; i <= 168; i++ { + store.Delete(types.ProposalKey(uint64(i))) + } + store.Delete(types.ProposalKey(uint64(172))) +} diff --git a/buf.work.yaml b/buf.work.yaml new file mode 100644 index 00000000..1878b341 --- /dev/null +++ b/buf.work.yaml @@ -0,0 +1,3 @@ +version: v1 +directories: + - proto diff --git a/cmd/kyved/app_creator.go b/cmd/kyved/app_creator.go new file mode 100644 index 00000000..e8820c0c --- /dev/null +++ b/cmd/kyved/app_creator.go @@ -0,0 +1,117 @@ +package main + +import ( + "errors" + "io" + "path/filepath" + + kyveApp "github.com/KYVENetwork/chain/app" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/server" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/snapshots" + snapshotsTypes "github.com/cosmos/cosmos-sdk/snapshots/types" + "github.com/cosmos/cosmos-sdk/store" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cast" + "github.com/tendermint/tendermint/libs/log" + dbm "github.com/tendermint/tm-db" +) + +// appCreator is a wrapper for EncodingConfig. +// This allows us to reuse encodingConfig received by NewRootCmd in both createApp and exportApp. +type appCreator struct{ encodingConfig kyveApp.EncodingConfig } + +func (ac appCreator) createApp( + logger log.Logger, + db dbm.DB, + traceStore io.Writer, + appOpts serverTypes.AppOptions, +) serverTypes.Application { + var cache sdk.MultiStorePersistentCache + + if cast.ToBool(appOpts.Get(server.FlagInterBlockCache)) { + cache = store.NewCommitKVStoreCacheManager() + } + + skipUpgradeHeights := make(map[int64]bool) + for _, h := range cast.ToIntSlice(appOpts.Get(server.FlagUnsafeSkipUpgrades)) { + skipUpgradeHeights[int64(h)] = true + } + + pruningOpts, err := server.GetPruningOptionsFromFlags(appOpts) + if err != nil { + panic(err) + } + + snapshotDir := filepath.Join(cast.ToString(appOpts.Get(flags.FlagHome)), "data", "snapshots") + snapshotDB, err := dbm.NewDB("metadata", dbm.GoLevelDBBackend, snapshotDir) + if err != nil { + panic(err) + } + snapshotStore, err := snapshots.NewStore(snapshotDB, snapshotDir) + if err != nil { + panic(err) + } + + snapshotOptions := snapshotsTypes.NewSnapshotOptions( + cast.ToUint64(appOpts.Get(server.FlagStateSyncSnapshotInterval)), + cast.ToUint32(appOpts.Get(server.FlagStateSyncSnapshotKeepRecent)), + ) + + return kyveApp.NewKYVEApp( + logger, db, traceStore, true, skipUpgradeHeights, + cast.ToString(appOpts.Get(flags.FlagHome)), + cast.ToUint(appOpts.Get(server.FlagInvCheckPeriod)), + ac.encodingConfig, + appOpts, + baseapp.SetPruning(pruningOpts), + baseapp.SetMinGasPrices(cast.ToString(appOpts.Get(server.FlagMinGasPrices))), + baseapp.SetMinRetainBlocks(cast.ToUint64(appOpts.Get(server.FlagMinRetainBlocks))), + baseapp.SetHaltHeight(cast.ToUint64(appOpts.Get(server.FlagHaltHeight))), + baseapp.SetHaltTime(cast.ToUint64(appOpts.Get(server.FlagHaltTime))), + baseapp.SetMinRetainBlocks(cast.ToUint64(appOpts.Get(server.FlagMinRetainBlocks))), + baseapp.SetInterBlockCache(cache), + baseapp.SetTrace(cast.ToBool(appOpts.Get(server.FlagTrace))), + baseapp.SetIndexEvents(cast.ToStringSlice(appOpts.Get(server.FlagIndexEvents))), + baseapp.SetSnapshot(snapshotStore, snapshotOptions), + baseapp.SetIAVLCacheSize(cast.ToInt(appOpts.Get(server.FlagIAVLCacheSize))), + baseapp.SetIAVLDisableFastNode(cast.ToBool(appOpts.Get(server.FlagDisableIAVLFastNode))), + ) +} + +func (ac appCreator) exportApp( + logger log.Logger, + db dbm.DB, + traceStore io.Writer, + height int64, + forZeroHeight bool, + jailWhiteList []string, + appOpts serverTypes.AppOptions, +) (serverTypes.ExportedApp, error) { + homePath, ok := appOpts.Get(flags.FlagHome).(string) + if !ok || homePath == "" { + return serverTypes.ExportedApp{}, errors.New("application home not set") + } + + app := kyveApp.NewKYVEApp( + logger, + db, + traceStore, + height == -1, // -1 means no height is provided + map[int64]bool{}, + homePath, + uint(1), + ac.encodingConfig, + appOpts, + ) + + if height != -1 { + if err := app.LoadHeight(height); err != nil { + return serverTypes.ExportedApp{}, err + } + } + + return app.ExportAppStateAndValidators(forZeroHeight, jailWhiteList) +} diff --git a/cmd/kyved/config.go b/cmd/kyved/config.go new file mode 100644 index 00000000..b9a7fa9f --- /dev/null +++ b/cmd/kyved/config.go @@ -0,0 +1,32 @@ +package main + +import ( + serverCfg "github.com/cosmos/cosmos-sdk/server/config" + sdk "github.com/cosmos/cosmos-sdk/types" + tmCfg "github.com/tendermint/tendermint/config" +) + +func initAppConfig() (string, *serverCfg.Config) { + cfg := serverCfg.DefaultConfig() + cfg.MinGasPrices = "0.001tkyve" + + return serverCfg.DefaultConfigTemplate, cfg +} + +func initSDKConfig(accountAddressPrefix string) { + accountPubKeyPrefix := accountAddressPrefix + "pub" + validatorAddressPrefix := accountAddressPrefix + "valoper" + validatorPubKeyPrefix := accountAddressPrefix + "valoperpub" + consNodeAddressPrefix := accountAddressPrefix + "valcons" + consNodePubKeyPrefix := accountAddressPrefix + "valconspub" + + config := sdk.GetConfig() + config.SetBech32PrefixForAccount(accountAddressPrefix, accountPubKeyPrefix) + config.SetBech32PrefixForValidator(validatorAddressPrefix, validatorPubKeyPrefix) + config.SetBech32PrefixForConsensusNode(consNodeAddressPrefix, consNodePubKeyPrefix) + config.Seal() +} + +func initTendermintConfig() *tmCfg.Config { + return tmCfg.DefaultConfig() +} diff --git a/cmd/kyved/gen_accounts.go b/cmd/kyved/gen_accounts.go new file mode 100644 index 00000000..3b04d3d8 --- /dev/null +++ b/cmd/kyved/gen_accounts.go @@ -0,0 +1,199 @@ +// https://github.com/ignite/cli/blob/v0.25.1/ignite/pkg/cosmoscmd/genaccounts.go + +package main + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/server" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cobra" + + // Auth + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + // Bank + bankTypes "github.com/cosmos/cosmos-sdk/x/bank/types" + // GenUtil + "github.com/cosmos/cosmos-sdk/x/genutil" + genUtilTypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + // Vesting + vestingTypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" +) + +const ( + flagVestingStart = "vesting-start-time" + flagVestingEnd = "vesting-end-time" + flagVestingAmt = "vesting-amount" +) + +// addGenesisAccountCmd returns add-genesis-account cobra Command. +func addGenesisAccountCmd(defaultNodeHome string) *cobra.Command { + cmd := &cobra.Command{ + Use: "add-genesis-account [address_or_key_name] [coin][,[coin]]", + Short: "Add a genesis account to genesis.json", + Long: `Add a genesis account to genesis.json. The provided account must specify +the account address or key name and a list of initial coins. If a key name is given, +the address will be looked up in the local Keybase. The list of initial tokens must +contain valid denominations. Accounts may optionally be supplied with vesting parameters. +`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + cdc := clientCtx.Codec + + serverCtx := server.GetServerContextFromCmd(cmd) + config := serverCtx.Config + + config.SetRoot(clientCtx.HomeDir) + + coins, err := sdk.ParseCoinsNormalized(args[1]) + if err != nil { + return fmt.Errorf("failed to parse coins: %w", err) + } + + addr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + inBuf := bufio.NewReader(cmd.InOrStdin()) + keyringBackend, err := cmd.Flags().GetString(flags.FlagKeyringBackend) + if err != nil { + return err + } + + // attempt to lookup address from Keybase if no address was provided + kb, err := keyring.New(sdk.KeyringServiceName(), keyringBackend, clientCtx.HomeDir, inBuf, cdc) + if err != nil { + return err + } + + info, err := kb.Key(args[0]) + if err != nil { + return fmt.Errorf("failed to get address from Keybase: %w", err) + } + + addr, err = info.GetAddress() + if err != nil { + return fmt.Errorf("failed to get address from Keybase: %w", err) + } + } + + vestingStart, err := cmd.Flags().GetInt64(flagVestingStart) + if err != nil { + return err + } + vestingEnd, err := cmd.Flags().GetInt64(flagVestingEnd) + if err != nil { + return err + } + vestingAmtStr, err := cmd.Flags().GetString(flagVestingAmt) + if err != nil { + return err + } + + vestingAmt, err := sdk.ParseCoinsNormalized(vestingAmtStr) + if err != nil { + return fmt.Errorf("failed to parse vesting amount: %w", err) + } + + // create concrete account type based on input parameters + var genAccount authTypes.GenesisAccount + + balances := bankTypes.Balance{Address: addr.String(), Coins: coins.Sort()} + baseAccount := authTypes.NewBaseAccount(addr, nil, 0, 0) + + if !vestingAmt.IsZero() { + baseVestingAccount := vestingTypes.NewBaseVestingAccount(baseAccount, vestingAmt.Sort(), vestingEnd) + + if (balances.Coins.IsZero() && !baseVestingAccount.OriginalVesting.IsZero()) || + baseVestingAccount.OriginalVesting.IsAnyGT(balances.Coins) { + return errors.New("vesting amount cannot be greater than total amount") + } + + switch { + case vestingStart != 0 && vestingEnd != 0: + genAccount = vestingTypes.NewContinuousVestingAccountRaw(baseVestingAccount, vestingStart) + + case vestingEnd != 0: + genAccount = vestingTypes.NewDelayedVestingAccountRaw(baseVestingAccount) + + default: + return errors.New("invalid vesting parameters; must supply start and end time or end time") + } + } else { + genAccount = baseAccount + } + + if err := genAccount.Validate(); err != nil { + return fmt.Errorf("failed to validate new genesis account: %w", err) + } + + genFile := config.GenesisFile() + appState, genDoc, err := genUtilTypes.GenesisStateFromGenFile(genFile) + if err != nil { + return fmt.Errorf("failed to unmarshal genesis state: %w", err) + } + + authGenState := authTypes.GetGenesisStateFromAppState(cdc, appState) + + accs, err := authTypes.UnpackAccounts(authGenState.Accounts) + if err != nil { + return fmt.Errorf("failed to get accounts from any: %w", err) + } + + if accs.Contains(addr) { + return fmt.Errorf("cannot add account at existing address %s", addr) + } + + // Add the new account to the set of genesis accounts and sanitize the + // accounts afterwards. + accs = append(accs, genAccount) + accs = authTypes.SanitizeGenesisAccounts(accs) + + genAccs, err := authTypes.PackAccounts(accs) + if err != nil { + return fmt.Errorf("failed to convert accounts into any's: %w", err) + } + authGenState.Accounts = genAccs + + authGenStateBz, err := cdc.MarshalJSON(&authGenState) + if err != nil { + return fmt.Errorf("failed to marshal auth genesis state: %w", err) + } + + appState[authTypes.ModuleName] = authGenStateBz + + bankGenState := bankTypes.GetGenesisStateFromAppState(cdc, appState) + bankGenState.Balances = append(bankGenState.Balances, balances) + bankGenState.Balances = bankTypes.SanitizeGenesisBalances(bankGenState.Balances) + + bankGenStateBz, err := cdc.MarshalJSON(bankGenState) + if err != nil { + return fmt.Errorf("failed to marshal bank genesis state: %w", err) + } + + appState[bankTypes.ModuleName] = bankGenStateBz + + appStateJSON, err := json.Marshal(appState) + if err != nil { + return fmt.Errorf("failed to marshal application genesis state: %w", err) + } + + genDoc.AppState = appStateJSON + return genutil.ExportGenesisFile(genDoc, genFile) + }, + } + + cmd.Flags().String(flags.FlagKeyringBackend, flags.DefaultKeyringBackend, "Select keyring's backend (os|file|kwallet|pass|test)") + cmd.Flags().String(flags.FlagHome, defaultNodeHome, "The application home directory") + cmd.Flags().String(flagVestingAmt, "", "amount of coins for vesting accounts") + cmd.Flags().Int64(flagVestingStart, 0, "schedule start time (unix epoch) for vesting accounts") + cmd.Flags().Int64(flagVestingEnd, 0, "schedule end time (unix epoch) for vesting accounts") + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/cmd/kyved/main.go b/cmd/kyved/main.go new file mode 100644 index 00000000..e9d1952b --- /dev/null +++ b/cmd/kyved/main.go @@ -0,0 +1,16 @@ +package main + +import ( + "os" + + kyveApp "github.com/KYVENetwork/chain/app" + serverCmd "github.com/cosmos/cosmos-sdk/server/cmd" +) + +func main() { + initSDKConfig(kyveApp.AccountAddressPrefix) + rootCmd := NewRootCmd(kyveApp.MakeEncodingConfig()) + if err := serverCmd.Execute(rootCmd, "", kyveApp.DefaultNodeHome); err != nil { + os.Exit(1) + } +} diff --git a/cmd/kyved/root.go b/cmd/kyved/root.go new file mode 100644 index 00000000..a2070fed --- /dev/null +++ b/cmd/kyved/root.go @@ -0,0 +1,197 @@ +package main + +import ( + "fmt" + "os" + "strconv" + "time" + + kyveApp "github.com/KYVENetwork/chain/app" + globalTypes "github.com/KYVENetwork/chain/x/global/types" + teamTypes "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/config" + "github.com/cosmos/cosmos-sdk/client/debug" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/keys" + "github.com/cosmos/cosmos-sdk/client/pruning" + "github.com/cosmos/cosmos-sdk/client/rpc" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/version" + "github.com/spf13/cobra" + tmCli "github.com/tendermint/tendermint/libs/cli" + + // Auth + authCli "github.com/cosmos/cosmos-sdk/x/auth/client/cli" + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + // Bank + bankTypes "github.com/cosmos/cosmos-sdk/x/bank/types" + // Crisis + "github.com/cosmos/cosmos-sdk/x/crisis" + // GenUtil + genUtilCli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" +) + +// NewRootCmd creates a new root command for the KYVE chain daemon. +func NewRootCmd(encodingConfig kyveApp.EncodingConfig) *cobra.Command { + initClientCtx := client.Context{}. + WithCodec(encodingConfig.Marshaler). + WithInterfaceRegistry(encodingConfig.InterfaceRegistry). + WithTxConfig(encodingConfig.TxConfig). + WithLegacyAmino(encodingConfig.Amino). + WithInput(os.Stdin). + WithAccountRetriever(authTypes.AccountRetriever{}). + WithBroadcastMode(flags.BroadcastBlock). + WithHomeDir(kyveApp.DefaultNodeHome). + WithViper("KYVE") + + rootCmd := &cobra.Command{ + Use: "kyved", + Short: "KYVE Chain Daemon", + PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { + cmd.SetOut(cmd.OutOrStdout()) + cmd.SetErr(cmd.ErrOrStderr()) + + initClientCtx, err := client.ReadPersistentCommandFlags(initClientCtx, cmd.Flags()) + if err != nil { + return err + } + + initClientCtx, err = config.ReadFromClientConfig(initClientCtx) + if err != nil { + return err + } + + if err := client.SetCmdClientContextHandler(initClientCtx, cmd); err != nil { + return err + } + + customAppTemplate, customAppConfig := initAppConfig() + customTMConfig := initTendermintConfig() + + return server.InterceptConfigsPreRunHandler(cmd, customAppTemplate, customAppConfig, customTMConfig) + }, + } + + ac := appCreator{encodingConfig} + server.AddCommands( + rootCmd, + kyveApp.DefaultNodeHome, + ac.createApp, + ac.exportApp, + func(startCmd *cobra.Command) { + crisis.AddModuleInitFlags(startCmd) + }, + ) + + rootCmd.AddCommand( + genUtilCli.InitCmd(kyveApp.ModuleBasics, kyveApp.DefaultNodeHome), + genUtilCli.CollectGenTxsCmd(bankTypes.GenesisBalancesIterator{}, kyveApp.DefaultNodeHome), + genUtilCli.MigrateGenesisCmd(), + genUtilCli.GenTxCmd( + kyveApp.ModuleBasics, + encodingConfig.TxConfig, + bankTypes.GenesisBalancesIterator{}, + kyveApp.DefaultNodeHome, + ), + infoCommand(), + genUtilCli.ValidateGenesisCmd(kyveApp.ModuleBasics), + addGenesisAccountCmd(kyveApp.DefaultNodeHome), + tmCli.NewCompletionCmd(rootCmd, true), + debug.Cmd(), + config.Cmd(), + pruning.PruningCmd(ac.createApp), + + rpc.StatusCommand(), + queryCommand(), + txCommand(), + keys.Commands(kyveApp.DefaultNodeHome), + ) + + return rootCmd +} + +func queryCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "query", + Aliases: []string{"q"}, + Short: "Querying subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + rpc.BlockCommand(), + rpc.ValidatorCommand(), + authCli.GetAccountCmd(), + authCli.QueryTxCmd(), + authCli.QueryTxsByEventsCmd(), + ) + + kyveApp.ModuleBasics.AddQueryCommands(cmd) + + cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID") + + return cmd +} + +func txCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "tx", + Short: "Transactions subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + authCli.GetSignCommand(), + authCli.GetSignBatchCommand(), + authCli.GetMultiSignCommand(), + authCli.GetValidateSignaturesCommand(), + authCli.GetBroadcastCommand(), + authCli.GetEncodeCommand(), + authCli.GetDecodeCommand(), + ) + + kyveApp.ModuleBasics.AddTxCommands(cmd) + + cmd.PersistentFlags().String(flags.FlagChainID, "", "The network chain ID") + + return cmd +} + +func infoCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "info", + Short: "Transactions subcommands", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println("Information about build variables:") + fmt.Printf("Version: %s\n", version.Version) + fmt.Printf("Denom: %s\n", globalTypes.Denom) + fmt.Printf("Team-Authority: %s\n", teamTypes.AUTHORITY_ADDRESS) + fmt.Printf("Team-Allocation: %s\n", formatInt(teamTypes.TEAM_ALLOCATION)) + fmt.Printf("Team-TGE: %s\n", time.Unix(int64(teamTypes.TGE), 0).String()) + return nil + }, + } + + return cmd +} + +func formatInt(number uint64) string { + output := strconv.FormatUint(number, 10) + startOffset := 3 + + outputIndex := len(output) + if len(output) >= 6 { + outputIndex -= 6 + output = output[:outputIndex] + "." + output[outputIndex:] + for outputIndex > startOffset { + outputIndex -= 3 + output = output[:outputIndex] + "," + output[outputIndex:] + } + } + return output +} diff --git a/config.yml b/config.yml new file mode 100644 index 00000000..ba3ab4ac --- /dev/null +++ b/config.yml @@ -0,0 +1,170 @@ +version: 1 +build: + main: cmd/kyved + proto: + path: proto + third_party_paths: + - third_party/proto + - proto_vendor +accounts: + - name: dummy + mnemonic: expect crisp umbrella hospital firm exhibit future size slot update blood + deliver fat happy ghost visa recall usual path purity junior ring ordinary stove + - name: alice + coins: + - 2000000000000000000tkyve + mnemonic: worry grief loyal smoke pencil arrow trap focus high pioneer tomato hedgehog + essence purchase dove pond knee custom phone gentle sunset addict mother fabric + - name: bob + coins: + - 1000000000000000000tkyve + mnemonic: crash sick toilet stumble join cash erode glory door weird diagram away + lizard solid segment apple urge joy annual able tank define candy demise + - name: charlie + coins: + - 1000000000000000000tkyve + mnemonic: shoot inject fragile width trend satisfy army enact volcano crowd message + strike true divorce search rich office shoulder sport relax rhythm symbol gadget + size + - name: faucet + coins: + - 1000000000000000000tkyve + mnemonic: open home share route party lake room solution quantum pact model avocado + humor dilemma review desert turtle total humor produce rate keen eagle fee +# - name: team +# coins: +# - 165000000000000000tkyve +# address: kyve1e29j95xmsw3zmvtrk4st8e89z5n72v7nf70ma4 + - name: foundation + coins: + # for fees + - 1000000000tkyve + mnemonic: evidence taxi material cabin annual phrase practice unhappy safe jealous + section drink illness panther bread aim mouse dolphin deliver ready doll finger search cheap +# address: kyve1fd4qu868n7arav8vteghcppxxa0p2vna5f5ep8 + - name: foundation_inflation + coins: + # for fees + - 1000000000tkyve + mnemonic: hold urge sustain chef furnace foam oven fall harsh core anchor during crew secret + craft rhythm choose gold soft aisle sort phrase enter orphan +# address: kyve1tuxsc7ez79aydyee86qxm05zyx0l7d78c2zzsn + +faucet: + name: faucet + coins: + - 1000000000000000tkyve + host: 0.0.0.0:4500 +client: + openapi: + path: docs/swagger/swagger.yml +genesis: + app_state: + bank: + balances: + - address: "kyve1e29j95xmsw3zmvtrk4st8e89z5n72v7nf70ma4" + coins: + - denom: "tkyve" + amount: "165000000000000000" + delegation: + params: + unbonding_delegation_time: 10 + global: + params: + burn_ratio: "0.5" + min_gas_price: "1" + gov: + deposit_params: + max_deposit_period: 300s + min_deposit: + - amount: "50_000_000_000" + denom: tkyve + voting_params: + voting_period: 120s + mint: + params: + mint_denom: "tkyve" + team: + account_count: "8" + account_list: + - id: "0" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1686693600" + commencement: "1663106400" + - id: "1" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1749852000" + commencement: "1694642400" + - id: "2" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1694642400" + commencement: "1615676400" + - id: "3" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1702508400" + commencement: "1647212400" + - id: "4" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1741906800" + commencement: "1663106400" + - id: "5" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1804978800" + commencement: "1710370800" + - id: "6" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1741906800" + commencement: "1615676400" + - id: "7" + total_allocation: "1000000000000000" + unlocked_claimed: "0" + clawback: "1804978800" + commencement: "1647212400" + pool: + pool_count: "1" + pool_list: + - config: "ar://tMTrJ8E3QgBNLz0-lyn6QrxasEIX46d14QFVmBWshSI" + current_compression_id: "1" + current_index: "0" + current_key: "" + current_storage_provider_id: "1" + current_summary: "" + funders: [] + id: "0" + logo: "ar://E3jXAOeJ3El7HQgOf_NtSOwVE7Sd0M4g4bYqjCBH9CU" + max_bundle_size: "100" + min_delegation: "100000000000" + name: "Avalanche // C-Chain" + operating_cost: "2500000000" + disabled: false + runtime: '@kyvejs/evm' + start_key: "0" + total_bundles: "0" + total_funds: "0" + upload_interval: "60" + protocol: + version: "1.0.0-beta.5" + binaries: "{\"kyve-macos-x64\":\"https://github.com/KYVENetwork/kyvejs/releases/download/%40kyvejs%2Fevm%401.0.0-beta.5/kyve-macos-x64.zip\"}" + last_upgrade: "0" + upgrade_plan: + version: "" + binaries: "" + scheduled_at: "0" + duration: "0" + stakers: + params: + commission_change_time: 10 + staking: + params: + bond_denom: tkyve + chain_id: kyve-local +validators: + - name: alice + bonded: 1000000000000000tkyve diff --git a/docs/config.json b/docs/config.json new file mode 100644 index 00000000..3ef0f89d --- /dev/null +++ b/docs/config.json @@ -0,0 +1,96 @@ +{ + "swagger": "2.0", + "info": { + "title": "KYVE Chain - REST API", + "description": "REST interface for query and transaction services", + "version": "1.0.0" + }, + "apis": [ + { + "url": "./tmp-swagger-gen/kyve/bundles/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "BundlesParams" + } + }, + "tags": { + "rename": { + "Query": "QueryBundles" + } + } + }, + { + "url": "./tmp-swagger-gen/kyve/delegation/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "DelegationParams" + } + }, + "tags": { + "rename": { + "Query": "QueryDelegation" + } + } + }, + { + "url": "./tmp-swagger-gen/kyve/global/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "GlobalParams" + } + }, + "tags": { + "rename": { + "Query": "QueryGlobal" + } + } + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/account.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/bundles.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/delegation.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/params.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/pools.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/query.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/query/v1beta1/stakers.swagger.json" + }, + { + "url": "./tmp-swagger-gen/kyve/stakers/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "StakersParams" + } + }, + "tags": { + "rename": { + "Query": "QueryStakers" + } + } + }, + { + "url": "./tmp-swagger-gen/kyve/team/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "TeamParams" + } + }, + "tags": { + "rename": { + "Query": "QueryTeam" + } + } + } + ] +} diff --git a/docs/docs.go b/docs/docs.go new file mode 100644 index 00000000..18b0f922 --- /dev/null +++ b/docs/docs.go @@ -0,0 +1,8 @@ +package docs + +import "embed" + +// Swagger is the data of the swagger page generated by protobuf +// +//go:embed swagger.yml +var Swagger embed.FS diff --git a/docs/handler.go b/docs/handler.go new file mode 100644 index 00000000..763b1969 --- /dev/null +++ b/docs/handler.go @@ -0,0 +1,27 @@ +// https://github.com/ignite/cli/blob/v0.25.1/ignite/pkg/openapiconsole/console.go + +package docs + +import ( + "embed" + "html/template" + "net/http" +) + +//go:embed index.tpl +var index embed.FS + +// Handler returns a http handler that servers OpenAPI console for an OpenAPI spec at specURL. +func Handler(title, specURL string) http.HandlerFunc { + t, _ := template.ParseFS(index, "index.tpl") + + return func(w http.ResponseWriter, req *http.Request) { + _ = t.Execute(w, struct { + Title string + URL string + }{ + title, + specURL, + }) + } +} diff --git a/docs/index.tpl b/docs/index.tpl new file mode 100644 index 00000000..8ec84e9e --- /dev/null +++ b/docs/index.tpl @@ -0,0 +1,24 @@ + + + + + {{ .Title }} + + + + +
+ + + + + diff --git a/docs/swagger.yml b/docs/swagger.yml new file mode 100644 index 00000000..09b5856a --- /dev/null +++ b/docs/swagger.yml @@ -0,0 +1,8463 @@ +swagger: '2.0' +info: + title: KYVE Chain - REST API + description: REST interface for query and transaction services + version: 1.0.0 +paths: + /kyve/bundles/v1beta1/params: + get: + summary: Parameters queries the parameters of the module. + operationId: BundlesParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + upload_timeout: + type: string + format: uint64 + description: upload_timeout ... + storage_cost: + type: string + format: uint64 + description: storage_cost ... + network_fee: + type: string + description: network_fee ... + max_points: + type: string + format: uint64 + description: max_points ... + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - QueryBundles + /kyve/delegation/v1beta1/params: + get: + summary: Parameters queries the parameters of the module. + operationId: DelegationParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + unbonding_delegation_time: + type: string + format: uint64 + description: unbonding_delegation_time ... + redelegation_cooldown: + type: string + format: uint64 + description: unbonding_delegation_time ... + redelegation_max_amount: + type: string + format: uint64 + description: unbonding_delegation_time ... + vote_slash: + type: string + description: vote_slash ... + upload_slash: + type: string + description: upload_slash ... + timeout_slash: + type: string + description: timeout_slash ... + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - QueryDelegation + /kyve/global/v1beta1/params: + get: + summary: Parameters queries the parameters of the module. + operationId: GlobalParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + min_gas_price: + type: string + description: >- + min_gas_price defines the minimum gas price value for all + transactions. + burn_ratio: + type: string + description: burn_ratio defines the ratio of transaction fees burnt. + gas_adjustments: + type: array + items: + type: object + properties: + type: + type: string + title: type of the sdk-message + amount: + type: string + format: uint64 + title: amount of gas which is added to the message + title: >- + GasAdjustment stores for every message type a fixed + amount + + of gas which is added to the message + description: >- + gas_adjustments can add a constant amount of gas to a + specific message type. + + This gives more control to make certain messages more + expensive to avoid spamming + + of certain types of messages. + gas_refunds: + type: array + items: + type: object + properties: + type: + type: string + title: type of the sdk-message + fraction: + type: string + title: fraction in decimal representation between 0 and 1 + description: >- + GasRefund stores the fraction of gas which will be + refunded for a given + + type of message. + + This only works if the transaction only includes one + message. + description: >- + gas_refunds lets the governance specify a fraction of how + much gas + + a user gets refunded for a certain type of transaction. + + This could be used to make transactions which support to + network cheaper. + + Gas refunds only work if the transaction only included one + message. + min_initial_deposit_ratio: + type: string + description: >- + min_initial_deposit_ratio sets a minimum fraction of + initial deposit for a + + governance proposal. This is used to avoid spamming of + proposals and + + polluting the proposals page. + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - QueryGlobal + /kyve/query/v1beta1/account_assets/{address}: + get: + summary: >- + AccountAssets returns an overview of the sum of all balances for a given + user. e.g. balance, staking, funding, etc. + operationId: AccountAssets + responses: + '200': + description: A successful response. + schema: + type: object + properties: + balance: + type: string + format: uint64 + description: balance ... + protocol_self_delegation: + type: string + format: uint64 + description: protocol_staking ... + protocol_self_delegation_unbonding: + type: string + format: uint64 + title: protocol_staking_unbonding + protocol_delegation: + type: string + format: uint64 + description: protocol_delegation ... + protocol_delegation_unbonding: + type: string + format: uint64 + title: protocol_delegation_unbonding + protocol_rewards: + type: string + format: uint64 + description: protocol_rewards ... + protocol_funding: + type: string + format: uint64 + description: protocol_funding ... + description: >- + QueryAccountAssetsResponse is the response type for the + Query/AccountAssets RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address ... + in: path + required: true + type: string + tags: + - QueryAccount + /kyve/query/v1beta1/account_delegation_unbondings/{address}: + get: + summary: AccountDelegationUnbondings ... + operationId: AccountDelegationUnbondings + responses: + '200': + description: A successful response. + schema: + type: object + properties: + unbondings: + type: array + items: + type: object + properties: + amount: + type: string + format: uint64 + title: amount + creation_time: + type: string + format: uint64 + title: creation_time + staker: + title: staker + type: object + properties: + address: + type: string + title: address of the staker + metadata: + description: metadata as logo, moniker, etc. + type: object + properties: + commission: + type: string + title: >- + commission is the percentage of the rewards that + will + + get transferred to the staker before the + remaining + + rewards are split across all delegators + moniker: + type: string + title: |- + moniker is a human-readable name for displaying + the staker in the UI + website: + type: string + title: >- + website is a https-link to the website of the + staker + logo: + type: string + title: >- + logo is a link to an image file (like jpg or + png) + pending_commission_change: + description: >- + pending_commission_change shows if the staker + plans + + to change its commission. Delegators will see a + warning in + + the UI. A Commission change takes some time + until + + the commission is applied. Users have time to + redelegate + + if they not agree with the new commission. + type: object + properties: + commission: + type: string + title: |- + commission is the new commission that will + become active once the change-time is over + creation_date: + type: string + format: int64 + description: >- + creation_date is the UNIX-timestamp (in + seconds) + + of when the entry was created. + title: >- + CommissionChangeEntry shows when the old + commission + + of a staker will change to the new commission + title: >- + StakerMetadata contains static information for a + staker + self_delegation: + type: string + format: uint64 + title: amount the staker has delegated to himself + self_delegation_unbonding: + type: string + format: uint64 + description: >- + unbonding_amount is the amount the staker is + currently unbonding + + from the self-delegation. + + This amount can be larger than `amount` when the + staker + + got slashed during unbonding. However, at the end of + + the unbonding period this amount is double checked + with the + + remaining amount. + total_delegation: + type: string + format: uint64 + title: |- + total_delegation returns the sum of all $KYVE users + have delegated to this staker + delegator_count: + type: string + format: uint64 + description: |- + delegator_count is the total number of individual + delegator addresses for that user. + pools: + type: array + items: + type: object + properties: + pool: + title: >- + pool contains useful information about the + pool + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each + bundle reward + upload_interval: + type: string + format: uint64 + title: >- + upload_interval is the interval bundles + get created + total_funds: + type: string + format: uint64 + title: >- + total_funds of the pool. If the pool runs + + out of funds no more bundles will be + produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + points: + type: string + format: uint64 + description: |- + points indicates if the staker is inactive + If the staker misses a vote, a point is added. + After 5 points the staker is removed from + the stakers set. + is_leaving: + type: boolean + title: >- + is_leaving indicates if a user has scheduled a + + a PoolLeave entry. After the leave-time is + over + + the staker will no longer participate in that + pool + valaddress: + type: string + description: >- + Valaddress is the address which is authorized + to vote + + and submit bundles. If the server gets + compromised + + the staker can just change the valaddress. + balance: + type: string + format: uint64 + title: >- + balance is the valaddress account balance and + indicates + + whether or not the valaccount needs additional + funds to + + pay for gas fees + title: |- + PoolMembership shows in which pool the staker + is participating + description: |- + pools is a list of all pools the staker is currently + participating, i.e. allowed to vote and upload data. + description: >- + QueryAccountAssetsResponse is the response type for the + Query/AccountAssets RPC method. + description: balance ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAccountAssetsResponse is the response type for the + Query/AccountAssets RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address ... + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - QueryAccount + /kyve/query/v1beta1/account_funded_list/{address}: + get: + summary: AccountFundedList returns all pools the given user has funded into. + operationId: AccountFundedList + responses: + '200': + description: A successful response. + schema: + type: object + properties: + funded: + type: array + items: + type: object + properties: + amount: + type: string + format: uint64 + description: amount ... + pool: + description: pool ... + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each bundle + reward + upload_interval: + type: string + format: uint64 + title: upload_interval is the interval bundles get created + total_funds: + type: string + format: uint64 + title: |- + total_funds of the pool. If the pool runs + out of funds no more bundles will be produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + title: >- + BasicPool contains the necessary properties need for a + pool + + to be displayed in the UI + description: Funded ... + description: funded ... + description: >- + QueryAccountFundedListResponse is the response type for the + Query/AccountFundedList RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address ... + in: path + required: true + type: string + tags: + - QueryAccount + /kyve/query/v1beta1/account_redelegation/{address}: + get: + summary: AccountRedelegation ... + operationId: AccountRedelegation + responses: + '200': + description: A successful response. + schema: + type: object + properties: + redelegation_cooldown_entries: + type: array + items: + type: object + properties: + creation_date: + type: string + format: uint64 + description: creation_date ... + finish_date: + type: string + format: uint64 + description: finish_date ... + description: RedelegationEntry ... + description: redelegation_cooldown_entries ... + available_slots: + type: string + format: uint64 + description: availableSlots ... + description: >- + QueryAccountDelegationListRequest is the response type for the + Query/AccountDelegationList RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address ... + in: path + required: true + type: string + tags: + - QueryAccount + /kyve/query/v1beta1/can_propose/{pool_id}/{staker}/{proposer}/{from_index}: + get: + summary: CanPropose ... + operationId: CanPropose + responses: + '200': + description: A successful response. + schema: + type: object + properties: + possible: + type: boolean + description: possible ... + reason: + type: string + description: reason ... + description: >- + QueryCanProposeResponse is the response type for the + Query/CanPropose RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id defines the unique ID of the pool. + in: path + required: true + type: string + format: uint64 + - name: staker + description: staker ... + in: path + required: true + type: string + - name: proposer + description: proposer ... + in: path + required: true + type: string + - name: from_index + description: from_index ... + in: path + required: true + type: string + format: uint64 + tags: + - QueryBundles + /kyve/query/v1beta1/can_validate/{pool_id}/{valaddress}: + get: + summary: CanValidate ... + operationId: CanValidate + responses: + '200': + description: A successful response. + schema: + type: object + properties: + possible: + type: boolean + description: possible ... + reason: + type: string + description: reason ... + description: >- + QueryCanProposeResponse is the response type for the + Query/CanPropose RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id defines the unique ID of the pool. + in: path + required: true + type: string + format: uint64 + - name: valaddress + description: valaddress ... + in: path + required: true + type: string + tags: + - QueryBundles + /kyve/query/v1beta1/can_vote/{pool_id}/{staker}/{voter}/{storage_id}: + get: + summary: CanVote checks if voter on pool can still vote for the given bundle + operationId: CanVote + responses: + '200': + description: A successful response. + schema: + type: object + properties: + possible: + type: boolean + description: possible ... + reason: + type: string + description: reason ... + description: >- + QueryCanVoteResponse is the response type for the Query/CanVote + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id defines the unique ID of the pool. + in: path + required: true + type: string + format: uint64 + - name: staker + description: staker ... + in: path + required: true + type: string + - name: voter + description: voter ... + in: path + required: true + type: string + - name: storage_id + description: storage_id ... + in: path + required: true + type: string + tags: + - QueryBundles + /kyve/query/v1beta1/current_vote_status/{pool_id}: + get: + summary: CurrentVoteStatus ... + operationId: CurrentVoteStatus + responses: + '200': + description: A successful response. + schema: + type: object + properties: + valid: + type: string + format: uint64 + description: valid ... + invalid: + type: string + format: uint64 + description: invalid ... + abstain: + type: string + format: uint64 + description: abstain ... + total: + type: string + format: uint64 + description: total ... + description: >- + QueryCurrentVoteStatusResponse is the response type for the + Query/Staker RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id ... + in: path + required: true + type: string + format: uint64 + tags: + - QueryBundles + /kyve/query/v1beta1/finalized_bundle/{pool_id}/{id}: + get: + summary: FinalizedBundle ... + operationId: FinalizedBundle + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_bundle: + description: finalized_bundle ... + type: object + properties: + pool_id: + type: string + format: uint64 + title: >- + pool_id is the id of the pool for which this proposal is + for + id: + type: string + format: uint64 + title: >- + id is a unique identifier for each finalized bundle in a + pool + storage_id: + type: string + title: >- + storage_id is the id with which the data can be retrieved + from + uploader: + type: string + title: >- + uploader is the address of the staker who submitted this + bundle + from_index: + type: string + format: uint64 + title: >- + from_index is the index from where the bundle starts + (inclusive) + to_index: + type: string + format: uint64 + title: to_index is the index to which the bundle goes (exclusive) + to_key: + type: string + title: >- + to_key the key of the last data item in the bundle + proposal + bundle_summary: + type: string + title: bundle_summary a string summary of the current proposal + data_hash: + type: string + title: data_hash a sha256 hash of the raw compressed data + finalized_at: + type: string + format: uint64 + title: >- + finalized_at is the block height at which this bundle got + finalized + from_key: + type: string + title: >- + from_key the key of the first data item in the bundle + proposal + storage_provider_id: + type: integer + format: int64 + title: >- + storage_provider_id the id of the storage provider where + the bundle is stored + compression_id: + type: integer + format: int64 + title: >- + compression_id the id of the compression type with which + the data was compressed + title: >- + FinalizedBundle represents a bundle proposal where the + majority + + agreed on its validity + description: >- + QueryFinalizedBundleResponse is the response type for the + Query/Staker RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id ... + in: path + required: true + type: string + format: uint64 + - name: id + description: id ... + in: path + required: true + type: string + format: uint64 + tags: + - QueryBundles + /kyve/query/v1beta1/finalized_bundle_by_height/{pool_id}/{height}: + get: + summary: Queries the bundle which contains the data given height + operationId: FinalizedBundlesByHeight + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_bundle: + description: finalized_bundle ... + type: object + properties: + pool_id: + type: string + format: uint64 + title: >- + pool_id is the id of the pool for which this proposal is + for + id: + type: string + format: uint64 + title: >- + id is a unique identifier for each finalized bundle in a + pool + storage_id: + type: string + title: >- + storage_id is the id with which the data can be retrieved + from + uploader: + type: string + title: >- + uploader is the address of the staker who submitted this + bundle + from_index: + type: string + format: uint64 + title: >- + from_index is the index from where the bundle starts + (inclusive) + to_index: + type: string + format: uint64 + title: to_index is the index to which the bundle goes (exclusive) + to_key: + type: string + title: >- + to_key the key of the last data item in the bundle + proposal + bundle_summary: + type: string + title: bundle_summary a string summary of the current proposal + data_hash: + type: string + title: data_hash a sha256 hash of the raw compressed data + finalized_at: + type: string + format: uint64 + title: >- + finalized_at is the block height at which this bundle got + finalized + from_key: + type: string + title: >- + from_key the key of the first data item in the bundle + proposal + storage_provider_id: + type: integer + format: int64 + title: >- + storage_provider_id the id of the storage provider where + the bundle is stored + compression_id: + type: integer + format: int64 + title: >- + compression_id the id of the compression type with which + the data was compressed + title: >- + FinalizedBundle represents a bundle proposal where the + majority + + agreed on its validity + description: >- + QueryFinalizedBundleResponse is the response type for the + Query/Staker RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id ... + in: path + required: true + type: string + format: uint64 + - name: height + description: id ... + in: path + required: true + type: string + format: uint64 + tags: + - QueryBundles + /kyve/query/v1beta1/finalized_bundles/{pool_id}: + get: + summary: FinalizedBundles ... + operationId: FinalizedBundles + responses: + '200': + description: A successful response. + schema: + type: object + properties: + finalized_bundles: + type: array + items: + type: object + properties: + pool_id: + type: string + format: uint64 + title: >- + pool_id is the id of the pool for which this proposal is + for + id: + type: string + format: uint64 + title: >- + id is a unique identifier for each finalized bundle in a + pool + storage_id: + type: string + title: >- + storage_id is the id with which the data can be + retrieved from + uploader: + type: string + title: >- + uploader is the address of the staker who submitted this + bundle + from_index: + type: string + format: uint64 + title: >- + from_index is the index from where the bundle starts + (inclusive) + to_index: + type: string + format: uint64 + title: >- + to_index is the index to which the bundle goes + (exclusive) + to_key: + type: string + title: >- + to_key the key of the last data item in the bundle + proposal + bundle_summary: + type: string + title: bundle_summary a string summary of the current proposal + data_hash: + type: string + title: data_hash a sha256 hash of the raw compressed data + finalized_at: + type: string + format: uint64 + title: >- + finalized_at is the block height at which this bundle + got finalized + from_key: + type: string + title: >- + from_key the key of the first data item in the bundle + proposal + storage_provider_id: + type: integer + format: int64 + title: >- + storage_provider_id the id of the storage provider where + the bundle is stored + compression_id: + type: integer + format: int64 + title: >- + compression_id the id of the compression type with which + the data was compressed + title: >- + FinalizedBundle represents a bundle proposal where the + majority + + agreed on its validity + description: finalized_bundles ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryStakersByPoolResponse is the response type for the + Query/Staker RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id ... + in: path + required: true + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - QueryBundles + /kyve/query/v1beta1/delegator/{staker}/{delegator}: + get: + summary: >- + Delegator returns delegation information for a specific delegator of a + specific staker. + operationId: Delegator + responses: + '200': + description: A successful response. + schema: + type: object + properties: + delegator: + description: delegator ... + type: object + properties: + delegator: + type: string + description: delegator ... + current_reward: + type: string + format: uint64 + description: current_reward ... + delegation_amount: + type: string + format: uint64 + description: delegation_amount ... + staker: + type: string + description: staker ... + description: >- + QueryDelegatorResponse is the response type for the + Query/Delegator RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: staker + description: staker ... + in: path + required: true + type: string + - name: delegator + description: delegator ... + in: path + required: true + type: string + tags: + - QueryDelegation + /kyve/query/v1beta1/delegators_by_staker/{staker}: + get: + summary: >- + DelegatorsByStaker returns all delegators that have delegated to the + given staker + + This query is paginated. + operationId: DelegatorsByStaker + responses: + '200': + description: A successful response. + schema: + type: object + properties: + delegators: + type: array + items: + type: object + properties: + delegator: + type: string + description: delegator ... + current_reward: + type: string + format: uint64 + description: current_reward ... + delegation_amount: + type: string + format: uint64 + description: delegation_amount ... + staker: + type: string + description: staker ... + description: StakerDelegatorResponse ... + description: delegators ... + total_delegation: + type: string + format: uint64 + title: total_delegation ... (consider metadata object) + total_delegator_count: + type: string + format: uint64 + description: total_delegation ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: QueryDelegatorsByStakerResponse ... + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: staker + description: staker ... + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - QueryDelegation + /kyve/query/v1beta1/stakers_by_delegator/{delegator}: + get: + summary: >- + StakersByPoolAndDelegator returns all stakers the given delegator has + delegated to. + + This query is paginated. + operationId: StakersByDelegator + responses: + '200': + description: A successful response. + schema: + type: object + properties: + delegator: + type: string + description: delegator ... + stakers: + type: array + items: + type: object + properties: + staker: + description: staker ... + type: object + properties: + address: + type: string + title: address of the staker + metadata: + description: metadata as logo, moniker, etc. + type: object + properties: + commission: + type: string + title: >- + commission is the percentage of the rewards that + will + + get transferred to the staker before the + remaining + + rewards are split across all delegators + moniker: + type: string + title: |- + moniker is a human-readable name for displaying + the staker in the UI + website: + type: string + title: >- + website is a https-link to the website of the + staker + logo: + type: string + title: >- + logo is a link to an image file (like jpg or + png) + pending_commission_change: + description: >- + pending_commission_change shows if the staker + plans + + to change its commission. Delegators will see a + warning in + + the UI. A Commission change takes some time + until + + the commission is applied. Users have time to + redelegate + + if they not agree with the new commission. + type: object + properties: + commission: + type: string + title: |- + commission is the new commission that will + become active once the change-time is over + creation_date: + type: string + format: int64 + description: >- + creation_date is the UNIX-timestamp (in + seconds) + + of when the entry was created. + title: >- + CommissionChangeEntry shows when the old + commission + + of a staker will change to the new commission + title: >- + StakerMetadata contains static information for a + staker + self_delegation: + type: string + format: uint64 + title: amount the staker has delegated to himself + self_delegation_unbonding: + type: string + format: uint64 + description: >- + unbonding_amount is the amount the staker is + currently unbonding + + from the self-delegation. + + This amount can be larger than `amount` when the + staker + + got slashed during unbonding. However, at the end of + + the unbonding period this amount is double checked + with the + + remaining amount. + total_delegation: + type: string + format: uint64 + title: |- + total_delegation returns the sum of all $KYVE users + have delegated to this staker + delegator_count: + type: string + format: uint64 + description: |- + delegator_count is the total number of individual + delegator addresses for that user. + pools: + type: array + items: + type: object + properties: + pool: + title: >- + pool contains useful information about the + pool + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each + bundle reward + upload_interval: + type: string + format: uint64 + title: >- + upload_interval is the interval bundles + get created + total_funds: + type: string + format: uint64 + title: >- + total_funds of the pool. If the pool runs + + out of funds no more bundles will be + produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + points: + type: string + format: uint64 + description: |- + points indicates if the staker is inactive + If the staker misses a vote, a point is added. + After 5 points the staker is removed from + the stakers set. + is_leaving: + type: boolean + title: >- + is_leaving indicates if a user has scheduled a + + a PoolLeave entry. After the leave-time is + over + + the staker will no longer participate in that + pool + valaddress: + type: string + description: >- + Valaddress is the address which is authorized + to vote + + and submit bundles. If the server gets + compromised + + the staker can just change the valaddress. + balance: + type: string + format: uint64 + title: >- + balance is the valaddress account balance and + indicates + + whether or not the valaccount needs additional + funds to + + pay for gas fees + title: |- + PoolMembership shows in which pool the staker + is participating + description: |- + pools is a list of all pools the staker is currently + participating, i.e. allowed to vote and upload data. + title: >- + FullStaker aggregates information from the staker and + its delegators + + as well as pending queue entries into one object. + + It contains almost all needed information for a + convenient usage + current_reward: + type: string + format: uint64 + description: current_reward ... + delegation_amount: + type: string + format: uint64 + description: delegation_amount ... + description: DelegationForStakerResponse ... + description: stakers ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: QueryStakersByDelegatorResponse ... + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: delegator + description: delegator ... + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - QueryDelegation + /kyve/query/v1beta1/params: + get: + summary: Pools queries for all pools. + operationId: Params + responses: + '200': + description: A successful response. + schema: + type: object + properties: + bundles_params: + description: bundles_params ... + type: object + properties: + upload_timeout: + type: string + format: uint64 + description: upload_timeout ... + storage_cost: + type: string + format: uint64 + description: storage_cost ... + network_fee: + type: string + description: network_fee ... + max_points: + type: string + format: uint64 + description: max_points ... + delegation_params: + description: delegation_params ... + type: object + properties: + unbonding_delegation_time: + type: string + format: uint64 + description: unbonding_delegation_time ... + redelegation_cooldown: + type: string + format: uint64 + description: unbonding_delegation_time ... + redelegation_max_amount: + type: string + format: uint64 + description: unbonding_delegation_time ... + vote_slash: + type: string + description: vote_slash ... + upload_slash: + type: string + description: upload_slash ... + timeout_slash: + type: string + description: timeout_slash ... + global_params: + description: global_params ... + type: object + properties: + min_gas_price: + type: string + description: >- + min_gas_price defines the minimum gas price value for all + transactions. + burn_ratio: + type: string + description: burn_ratio defines the ratio of transaction fees burnt. + gas_adjustments: + type: array + items: + type: object + properties: + type: + type: string + title: type of the sdk-message + amount: + type: string + format: uint64 + title: amount of gas which is added to the message + title: >- + GasAdjustment stores for every message type a fixed + amount + + of gas which is added to the message + description: >- + gas_adjustments can add a constant amount of gas to a + specific message type. + + This gives more control to make certain messages more + expensive to avoid spamming + + of certain types of messages. + gas_refunds: + type: array + items: + type: object + properties: + type: + type: string + title: type of the sdk-message + fraction: + type: string + title: fraction in decimal representation between 0 and 1 + description: >- + GasRefund stores the fraction of gas which will be + refunded for a given + + type of message. + + This only works if the transaction only includes one + message. + description: >- + gas_refunds lets the governance specify a fraction of how + much gas + + a user gets refunded for a certain type of transaction. + + This could be used to make transactions which support to + network cheaper. + + Gas refunds only work if the transaction only included one + message. + min_initial_deposit_ratio: + type: string + description: >- + min_initial_deposit_ratio sets a minimum fraction of + initial deposit for a + + governance proposal. This is used to avoid spamming of + proposals and + + polluting the proposals page. + gov_params: + description: gov_params ... + type: object + properties: + voting_params: + description: voting_params defines the parameters related to voting. + type: object + properties: + voting_period: + type: string + description: Length of the voting period. + deposit_params: + description: deposit_params defines the parameters related to deposit. + type: object + properties: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an + amount. + + + NOTE: The amount field is an Int which implements + the custom method + + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: + type: string + description: >- + Maximum period for Atom holders to deposit on a + proposal. Initial value: 2 + months. + tally_params: + description: tally_params defines the parameters related to tally. + type: object + properties: + quorum: + type: string + description: >- + Minimum percentage of total stake needed to vote for a + result to be + considered valid. + threshold: + type: string + description: >- + Minimum proportion of Yes votes for proposal to pass. + Default value: 0.5. + veto_threshold: + type: string + description: >- + Minimum value of Veto votes to Total votes ratio for + proposal to be + vetoed. Default value: 1/3. + stakers_params: + description: stakers_params ... + type: object + properties: + commission_change_time: + type: string + format: uint64 + description: commission_change_time ... + leave_pool_time: + type: string + format: uint64 + description: commission_change_time ... + description: QueryParamsResponse ... + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + tags: + - QueryParams + /kyve/query/v1beta1/pool/{id}: + get: + summary: Pool queries a pool by its Id. + operationId: Pool + responses: + '200': + description: A successful response. + schema: + type: object + properties: + pool: + description: pool ... + type: object + properties: + id: + type: string + format: uint64 + description: id ... + data: + description: data ... + type: object + properties: + id: + type: string + format: uint64 + title: id - unique identifier of the pool, can not be changed + name: + type: string + title: name is a human readable name for the pool + runtime: + type: string + title: >- + runtime specified which protocol and which version + needs is required + logo: + type: string + title: logo is a link to an image file + config: + type: string + description: >- + config is either a JSON encoded string or a link to an + external storage provider. + + This is up to the implementation of the protocol node. + start_key: + type: string + description: start_key ... + current_key: + type: string + description: current_key ... + current_summary: + type: string + description: current_summary ... + current_index: + type: string + format: uint64 + description: current_index ... + total_bundles: + type: string + format: uint64 + title: total_bundles is the number of total finalized bundles + upload_interval: + type: string + format: uint64 + description: upload_interval ... + operating_cost: + type: string + format: uint64 + description: operating_cost ... + min_delegation: + type: string + format: uint64 + description: min_delegation ... + max_bundle_size: + type: string + format: uint64 + description: max_bundle_size ... + disabled: + type: boolean + description: |- + disabled is true when the pool is disabled. + Can only be done via governance. + funders: + type: array + items: + type: object + properties: + address: + type: string + title: address is the address of the funder + amount: + type: string + format: uint64 + title: >- + amount is the current amount of funds in ukyve + the funder has + + still funded the pool with + title: >- + Funder is the object which holds info about a single + pool funder + description: funders ... + total_funds: + type: string + format: uint64 + description: total_funds ... + protocol: + description: protocol ... + type: object + properties: + version: + type: string + title: >- + version holds the current software version tag of + the pool binaries + binaries: + type: string + title: >- + binaries is a stringified json object which holds + binaries in the + + current version for multiple platforms and + architectures + last_upgrade: + type: string + format: uint64 + title: >- + last_upgrade is the unix time the pool was + upgraded the last time + title: >- + Protocol holds all info about the current pool version + and the + + available binaries for participating as a validator in + a pool + upgrade_plan: + description: upgrade_plan ... + type: object + properties: + version: + type: string + title: >- + version is the new software version tag of the + upgrade + binaries: + type: string + title: >- + binaries is the new stringified json object which + holds binaries in the + + upgrade version for multiple platforms and + architectures + scheduled_at: + type: string + format: uint64 + title: >- + scheduled_at is the unix time the upgrade is + supposed to be done + duration: + type: string + format: uint64 + title: >- + duration is the time in seconds how long the pool + should halt + + during the upgrade to give all validators a chance + of switching + + to the new binaries + title: >- + Upgrade holds all info when a pool has a scheduled + upgrade + current_storage_provider_id: + type: integer + format: int64 + description: storage_provider_id ... + current_compression_id: + type: integer + format: int64 + description: compression_id ... + bundle_proposal: + description: bundle_proposal ... + type: object + properties: + pool_id: + type: string + format: uint64 + title: >- + pool_id is the id of the pool for which this proposal + is for + storage_id: + type: string + title: >- + storage_id is the id with which the data can be + retrieved from + uploader: + type: string + title: >- + uploader is the address of the staker who submitted + the current proposal + next_uploader: + type: string + title: >- + next_uploader is the address of the staker who should + upload the next proposal + data_size: + type: string + format: uint64 + title: data_size the size of the data in bytes + bundle_size: + type: string + format: uint64 + title: >- + bundle_size the size of the bundle (amount of data + items) + to_key: + type: string + title: >- + to_key the key of the last data item in the bundle + proposal + bundle_summary: + type: string + title: >- + bundle_summary a string summary of the current + proposal + data_hash: + type: string + title: data_hash a sha256 hash of the raw compressed data + updated_at: + type: string + format: uint64 + title: updated_at the last time this proposal was edited + voters_valid: + type: array + items: + type: string + title: >- + voters_valid list of all stakers who voted in favor + for current proposal + voters_invalid: + type: array + items: + type: string + title: >- + voters_invalid list of all stakers who voted against + for current proposal + voters_abstain: + type: array + items: + type: string + title: >- + voters_abstain list of all stakers who voted abstain + for current proposal + from_key: + type: string + title: >- + from_key the key of the first data item in the bundle + proposal + storage_provider_id: + type: integer + format: int64 + title: >- + storage_provider_id the id of the storage provider + where the bundle is stored + compression_id: + type: integer + format: int64 + title: >- + compression_id the id of the compression type with + which the data was compressed + title: |- + BundleProposal represents the current bundle proposal + of a storage pool + stakers: + type: array + items: + type: string + description: stakers ... + total_self_delegation: + type: string + format: uint64 + description: total_stake ... + total_delegation: + type: string + format: uint64 + description: total_delegation ... + status: + description: status ... + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + description: >- + QueryPoolResponse is the response type for the Query/Pool RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: id + description: id defines the unique ID of the pool. + in: path + required: true + type: string + format: uint64 + tags: + - QueryPool + /kyve/query/v1beta1/pools: + get: + summary: Pools queries for all pools. + operationId: Pools + responses: + '200': + description: A successful response. + schema: + type: object + properties: + pools: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id ... + data: + description: data ... + type: object + properties: + id: + type: string + format: uint64 + title: >- + id - unique identifier of the pool, can not be + changed + name: + type: string + title: name is a human readable name for the pool + runtime: + type: string + title: >- + runtime specified which protocol and which version + needs is required + logo: + type: string + title: logo is a link to an image file + config: + type: string + description: >- + config is either a JSON encoded string or a link to + an external storage provider. + + This is up to the implementation of the protocol + node. + start_key: + type: string + description: start_key ... + current_key: + type: string + description: current_key ... + current_summary: + type: string + description: current_summary ... + current_index: + type: string + format: uint64 + description: current_index ... + total_bundles: + type: string + format: uint64 + title: >- + total_bundles is the number of total finalized + bundles + upload_interval: + type: string + format: uint64 + description: upload_interval ... + operating_cost: + type: string + format: uint64 + description: operating_cost ... + min_delegation: + type: string + format: uint64 + description: min_delegation ... + max_bundle_size: + type: string + format: uint64 + description: max_bundle_size ... + disabled: + type: boolean + description: |- + disabled is true when the pool is disabled. + Can only be done via governance. + funders: + type: array + items: + type: object + properties: + address: + type: string + title: address is the address of the funder + amount: + type: string + format: uint64 + title: >- + amount is the current amount of funds in ukyve + the funder has + + still funded the pool with + title: >- + Funder is the object which holds info about a + single pool funder + description: funders ... + total_funds: + type: string + format: uint64 + description: total_funds ... + protocol: + description: protocol ... + type: object + properties: + version: + type: string + title: >- + version holds the current software version tag + of the pool binaries + binaries: + type: string + title: >- + binaries is a stringified json object which + holds binaries in the + + current version for multiple platforms and + architectures + last_upgrade: + type: string + format: uint64 + title: >- + last_upgrade is the unix time the pool was + upgraded the last time + title: >- + Protocol holds all info about the current pool + version and the + + available binaries for participating as a validator + in a pool + upgrade_plan: + description: upgrade_plan ... + type: object + properties: + version: + type: string + title: >- + version is the new software version tag of the + upgrade + binaries: + type: string + title: >- + binaries is the new stringified json object + which holds binaries in the + + upgrade version for multiple platforms and + architectures + scheduled_at: + type: string + format: uint64 + title: >- + scheduled_at is the unix time the upgrade is + supposed to be done + duration: + type: string + format: uint64 + title: >- + duration is the time in seconds how long the + pool should halt + + during the upgrade to give all validators a + chance of switching + + to the new binaries + title: >- + Upgrade holds all info when a pool has a scheduled + upgrade + current_storage_provider_id: + type: integer + format: int64 + description: storage_provider_id ... + current_compression_id: + type: integer + format: int64 + description: compression_id ... + bundle_proposal: + description: bundle_proposal ... + type: object + properties: + pool_id: + type: string + format: uint64 + title: >- + pool_id is the id of the pool for which this + proposal is for + storage_id: + type: string + title: >- + storage_id is the id with which the data can be + retrieved from + uploader: + type: string + title: >- + uploader is the address of the staker who submitted + the current proposal + next_uploader: + type: string + title: >- + next_uploader is the address of the staker who + should upload the next proposal + data_size: + type: string + format: uint64 + title: data_size the size of the data in bytes + bundle_size: + type: string + format: uint64 + title: >- + bundle_size the size of the bundle (amount of data + items) + to_key: + type: string + title: >- + to_key the key of the last data item in the bundle + proposal + bundle_summary: + type: string + title: >- + bundle_summary a string summary of the current + proposal + data_hash: + type: string + title: data_hash a sha256 hash of the raw compressed data + updated_at: + type: string + format: uint64 + title: updated_at the last time this proposal was edited + voters_valid: + type: array + items: + type: string + title: >- + voters_valid list of all stakers who voted in favor + for current proposal + voters_invalid: + type: array + items: + type: string + title: >- + voters_invalid list of all stakers who voted against + for current proposal + voters_abstain: + type: array + items: + type: string + title: >- + voters_abstain list of all stakers who voted abstain + for current proposal + from_key: + type: string + title: >- + from_key the key of the first data item in the + bundle proposal + storage_provider_id: + type: integer + format: int64 + title: >- + storage_provider_id the id of the storage provider + where the bundle is stored + compression_id: + type: integer + format: int64 + title: >- + compression_id the id of the compression type with + which the data was compressed + title: |- + BundleProposal represents the current bundle proposal + of a storage pool + stakers: + type: array + items: + type: string + description: stakers ... + total_self_delegation: + type: string + format: uint64 + description: total_stake ... + total_delegation: + type: string + format: uint64 + description: total_delegation ... + status: + description: status ... + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + description: PoolResponse ... + description: pools ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryPoolsResponse is the response type for the Query/Pools RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + - name: search + description: search ... + in: query + required: false + type: string + - name: runtime + description: runtime ... + in: query + required: false + type: string + - name: disabled + description: disabled ... + in: query + required: false + type: boolean + - name: storage_provider_id + description: storage_provider_id ... + in: query + required: false + type: integer + format: int64 + tags: + - QueryPool + /kyve/query/v1beta1/staker/{address}: + get: + summary: Staker queries for all stakers. + operationId: Staker + responses: + '200': + description: A successful response. + schema: + type: object + properties: + staker: + description: staker ... + type: object + properties: + address: + type: string + title: address of the staker + metadata: + description: metadata as logo, moniker, etc. + type: object + properties: + commission: + type: string + title: |- + commission is the percentage of the rewards that will + get transferred to the staker before the remaining + rewards are split across all delegators + moniker: + type: string + title: |- + moniker is a human-readable name for displaying + the staker in the UI + website: + type: string + title: website is a https-link to the website of the staker + logo: + type: string + title: logo is a link to an image file (like jpg or png) + pending_commission_change: + description: >- + pending_commission_change shows if the staker plans + + to change its commission. Delegators will see a + warning in + + the UI. A Commission change takes some time until + + the commission is applied. Users have time to + redelegate + + if they not agree with the new commission. + type: object + properties: + commission: + type: string + title: |- + commission is the new commission that will + become active once the change-time is over + creation_date: + type: string + format: int64 + description: |- + creation_date is the UNIX-timestamp (in seconds) + of when the entry was created. + title: |- + CommissionChangeEntry shows when the old commission + of a staker will change to the new commission + title: StakerMetadata contains static information for a staker + self_delegation: + type: string + format: uint64 + title: amount the staker has delegated to himself + self_delegation_unbonding: + type: string + format: uint64 + description: >- + unbonding_amount is the amount the staker is currently + unbonding + + from the self-delegation. + + This amount can be larger than `amount` when the staker + + got slashed during unbonding. However, at the end of + + the unbonding period this amount is double checked with + the + + remaining amount. + total_delegation: + type: string + format: uint64 + title: |- + total_delegation returns the sum of all $KYVE users + have delegated to this staker + delegator_count: + type: string + format: uint64 + description: |- + delegator_count is the total number of individual + delegator addresses for that user. + pools: + type: array + items: + type: object + properties: + pool: + title: pool contains useful information about the pool + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each + bundle reward + upload_interval: + type: string + format: uint64 + title: >- + upload_interval is the interval bundles get + created + total_funds: + type: string + format: uint64 + title: |- + total_funds of the pool. If the pool runs + out of funds no more bundles will be produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + points: + type: string + format: uint64 + description: |- + points indicates if the staker is inactive + If the staker misses a vote, a point is added. + After 5 points the staker is removed from + the stakers set. + is_leaving: + type: boolean + title: |- + is_leaving indicates if a user has scheduled a + a PoolLeave entry. After the leave-time is over + the staker will no longer participate in that pool + valaddress: + type: string + description: >- + Valaddress is the address which is authorized to + vote + + and submit bundles. If the server gets compromised + + the staker can just change the valaddress. + balance: + type: string + format: uint64 + title: >- + balance is the valaddress account balance and + indicates + + whether or not the valaccount needs additional funds + to + + pay for gas fees + title: |- + PoolMembership shows in which pool the staker + is participating + description: |- + pools is a list of all pools the staker is currently + participating, i.e. allowed to vote and upload data. + title: >- + FullStaker aggregates information from the staker and its + delegators + + as well as pending queue entries into one object. + + It contains almost all needed information for a convenient + usage + description: >- + QueryStakerResponse is the response type for the Query/Staker RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address ... + in: path + required: true + type: string + tags: + - QueryStakers + /kyve/query/v1beta1/stakers: + get: + summary: Stakers queries for all stakers. + operationId: Stakers + responses: + '200': + description: A successful response. + schema: + type: object + properties: + stakers: + type: array + items: + type: object + properties: + address: + type: string + title: address of the staker + metadata: + description: metadata as logo, moniker, etc. + type: object + properties: + commission: + type: string + title: >- + commission is the percentage of the rewards that + will + + get transferred to the staker before the remaining + + rewards are split across all delegators + moniker: + type: string + title: |- + moniker is a human-readable name for displaying + the staker in the UI + website: + type: string + title: website is a https-link to the website of the staker + logo: + type: string + title: logo is a link to an image file (like jpg or png) + pending_commission_change: + description: >- + pending_commission_change shows if the staker plans + + to change its commission. Delegators will see a + warning in + + the UI. A Commission change takes some time until + + the commission is applied. Users have time to + redelegate + + if they not agree with the new commission. + type: object + properties: + commission: + type: string + title: |- + commission is the new commission that will + become active once the change-time is over + creation_date: + type: string + format: int64 + description: |- + creation_date is the UNIX-timestamp (in seconds) + of when the entry was created. + title: |- + CommissionChangeEntry shows when the old commission + of a staker will change to the new commission + title: StakerMetadata contains static information for a staker + self_delegation: + type: string + format: uint64 + title: amount the staker has delegated to himself + self_delegation_unbonding: + type: string + format: uint64 + description: >- + unbonding_amount is the amount the staker is currently + unbonding + + from the self-delegation. + + This amount can be larger than `amount` when the staker + + got slashed during unbonding. However, at the end of + + the unbonding period this amount is double checked with + the + + remaining amount. + total_delegation: + type: string + format: uint64 + title: |- + total_delegation returns the sum of all $KYVE users + have delegated to this staker + delegator_count: + type: string + format: uint64 + description: |- + delegator_count is the total number of individual + delegator addresses for that user. + pools: + type: array + items: + type: object + properties: + pool: + title: pool contains useful information about the pool + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each + bundle reward + upload_interval: + type: string + format: uint64 + title: >- + upload_interval is the interval bundles get + created + total_funds: + type: string + format: uint64 + title: |- + total_funds of the pool. If the pool runs + out of funds no more bundles will be produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + points: + type: string + format: uint64 + description: |- + points indicates if the staker is inactive + If the staker misses a vote, a point is added. + After 5 points the staker is removed from + the stakers set. + is_leaving: + type: boolean + title: |- + is_leaving indicates if a user has scheduled a + a PoolLeave entry. After the leave-time is over + the staker will no longer participate in that pool + valaddress: + type: string + description: >- + Valaddress is the address which is authorized to + vote + + and submit bundles. If the server gets compromised + + the staker can just change the valaddress. + balance: + type: string + format: uint64 + title: >- + balance is the valaddress account balance and + indicates + + whether or not the valaccount needs additional + funds to + + pay for gas fees + title: |- + PoolMembership shows in which pool the staker + is participating + description: |- + pools is a list of all pools the staker is currently + participating, i.e. allowed to vote and upload data. + title: >- + FullStaker aggregates information from the staker and its + delegators + + as well as pending queue entries into one object. + + It contains almost all needed information for a convenient + usage + description: stakers ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryStakersResponse is the response type for the Query/Stakers + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + - name: status + description: |- + status looks whether a staker is participating in pools or not. + + - STAKER_STATUS_UNSPECIFIED: STAKER_STATUS_UNSPECIFIED ... + - STAKER_STATUS_ACTIVE: STAKER_STATUS_ACTIVE ... + - STAKER_STATUS_INACTIVE: STAKER_STATUS_INACTIVE ... + in: query + required: false + type: string + enum: + - STAKER_STATUS_UNSPECIFIED + - STAKER_STATUS_ACTIVE + - STAKER_STATUS_INACTIVE + default: STAKER_STATUS_UNSPECIFIED + - name: search + description: search searches for moniker OR address. + in: query + required: false + type: string + tags: + - QueryStakers + /kyve/query/v1beta1/stakers_by_pool/{pool_id}: + get: + summary: >- + StakersByPool queries for all stakers that are currently participating + in the given pool + operationId: StakersByPool + responses: + '200': + description: A successful response. + schema: + type: object + properties: + stakers: + type: array + items: + type: object + properties: + staker: + type: object + properties: + address: + type: string + title: address of the staker + metadata: + description: metadata as logo, moniker, etc. + type: object + properties: + commission: + type: string + title: >- + commission is the percentage of the rewards that + will + + get transferred to the staker before the + remaining + + rewards are split across all delegators + moniker: + type: string + title: |- + moniker is a human-readable name for displaying + the staker in the UI + website: + type: string + title: >- + website is a https-link to the website of the + staker + logo: + type: string + title: >- + logo is a link to an image file (like jpg or + png) + pending_commission_change: + description: >- + pending_commission_change shows if the staker + plans + + to change its commission. Delegators will see a + warning in + + the UI. A Commission change takes some time + until + + the commission is applied. Users have time to + redelegate + + if they not agree with the new commission. + type: object + properties: + commission: + type: string + title: |- + commission is the new commission that will + become active once the change-time is over + creation_date: + type: string + format: int64 + description: >- + creation_date is the UNIX-timestamp (in + seconds) + + of when the entry was created. + title: >- + CommissionChangeEntry shows when the old + commission + + of a staker will change to the new commission + title: >- + StakerMetadata contains static information for a + staker + self_delegation: + type: string + format: uint64 + title: amount the staker has delegated to himself + self_delegation_unbonding: + type: string + format: uint64 + description: >- + unbonding_amount is the amount the staker is + currently unbonding + + from the self-delegation. + + This amount can be larger than `amount` when the + staker + + got slashed during unbonding. However, at the end of + + the unbonding period this amount is double checked + with the + + remaining amount. + total_delegation: + type: string + format: uint64 + title: |- + total_delegation returns the sum of all $KYVE users + have delegated to this staker + delegator_count: + type: string + format: uint64 + description: |- + delegator_count is the total number of individual + delegator addresses for that user. + pools: + type: array + items: + type: object + properties: + pool: + title: >- + pool contains useful information about the + pool + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each + bundle reward + upload_interval: + type: string + format: uint64 + title: >- + upload_interval is the interval bundles + get created + total_funds: + type: string + format: uint64 + title: >- + total_funds of the pool. If the pool runs + + out of funds no more bundles will be + produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + points: + type: string + format: uint64 + description: |- + points indicates if the staker is inactive + If the staker misses a vote, a point is added. + After 5 points the staker is removed from + the stakers set. + is_leaving: + type: boolean + title: >- + is_leaving indicates if a user has scheduled a + + a PoolLeave entry. After the leave-time is + over + + the staker will no longer participate in that + pool + valaddress: + type: string + description: >- + Valaddress is the address which is authorized + to vote + + and submit bundles. If the server gets + compromised + + the staker can just change the valaddress. + balance: + type: string + format: uint64 + title: >- + balance is the valaddress account balance and + indicates + + whether or not the valaccount needs additional + funds to + + pay for gas fees + title: |- + PoolMembership shows in which pool the staker + is participating + description: |- + pools is a list of all pools the staker is currently + participating, i.e. allowed to vote and upload data. + title: >- + FullStaker aggregates information from the staker and + its delegators + + as well as pending queue entries into one object. + + It contains almost all needed information for a + convenient usage + description: staker ... + valaccount: + description: valaccount ... + type: object + properties: + pool_id: + type: string + format: uint64 + description: |- + pool_id defines the pool in which the address + is allowed to vote in. + staker: + type: string + description: staker is the address the valaccount is voting for. + valaddress: + type: string + title: |- + valaddress is the account stored on the protocol + node which votes for the staker in the given pool + points: + type: string + format: uint64 + description: |- + When a node is inactive (does not vote at all) + A point is added, after a certain amount of points + is reached the node gets kicked out. + is_leaving: + type: boolean + description: >- + isLeaving indicates if a staker is leaving the given + pool. + description: StakerPoolResponse ... + description: stakers ... + description: >- + QueryStakersByPoolResponse is the response type for the + Query/Staker RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pool_id + description: pool_id ... + in: path + required: true + type: string + format: uint64 + tags: + - QueryStakers + /kyve/query/v1beta1/stakers_by_pool_count: + get: + summary: >- + StakersByPool queries for all stakers and sorted them first by number of + pools participating and + + then by delegation + operationId: StakersByPoolCount + responses: + '200': + description: A successful response. + schema: + type: object + properties: + stakers: + type: array + items: + type: object + properties: + address: + type: string + title: address of the staker + metadata: + description: metadata as logo, moniker, etc. + type: object + properties: + commission: + type: string + title: >- + commission is the percentage of the rewards that + will + + get transferred to the staker before the remaining + + rewards are split across all delegators + moniker: + type: string + title: |- + moniker is a human-readable name for displaying + the staker in the UI + website: + type: string + title: website is a https-link to the website of the staker + logo: + type: string + title: logo is a link to an image file (like jpg or png) + pending_commission_change: + description: >- + pending_commission_change shows if the staker plans + + to change its commission. Delegators will see a + warning in + + the UI. A Commission change takes some time until + + the commission is applied. Users have time to + redelegate + + if they not agree with the new commission. + type: object + properties: + commission: + type: string + title: |- + commission is the new commission that will + become active once the change-time is over + creation_date: + type: string + format: int64 + description: |- + creation_date is the UNIX-timestamp (in seconds) + of when the entry was created. + title: |- + CommissionChangeEntry shows when the old commission + of a staker will change to the new commission + title: StakerMetadata contains static information for a staker + self_delegation: + type: string + format: uint64 + title: amount the staker has delegated to himself + self_delegation_unbonding: + type: string + format: uint64 + description: >- + unbonding_amount is the amount the staker is currently + unbonding + + from the self-delegation. + + This amount can be larger than `amount` when the staker + + got slashed during unbonding. However, at the end of + + the unbonding period this amount is double checked with + the + + remaining amount. + total_delegation: + type: string + format: uint64 + title: |- + total_delegation returns the sum of all $KYVE users + have delegated to this staker + delegator_count: + type: string + format: uint64 + description: |- + delegator_count is the total number of individual + delegator addresses for that user. + pools: + type: array + items: + type: object + properties: + pool: + title: pool contains useful information about the pool + type: object + properties: + id: + type: string + format: uint64 + title: id is the ID of the pool + name: + type: string + title: name of the pool + runtime: + type: string + description: |- + runtime for the protocol nodes + like evm, bitcoin, etc. + logo: + type: string + title: logo of the pool + operating_cost: + type: string + format: uint64 + title: >- + operating_cost is the base payout for each + bundle reward + upload_interval: + type: string + format: uint64 + title: >- + upload_interval is the interval bundles get + created + total_funds: + type: string + format: uint64 + title: |- + total_funds of the pool. If the pool runs + out of funds no more bundles will be produced + total_delegation: + type: string + format: uint64 + title: total_delegation of the pool + status: + description: |- + status of the pool if pool is able + to produce bundles, etc. + type: string + enum: + - POOL_STATUS_UNSPECIFIED + - POOL_STATUS_ACTIVE + - POOL_STATUS_DISABLED + - POOL_STATUS_NO_FUNDS + - POOL_STATUS_NOT_ENOUGH_DELEGATION + - POOL_STATUS_UPGRADING + default: POOL_STATUS_UNSPECIFIED + points: + type: string + format: uint64 + description: |- + points indicates if the staker is inactive + If the staker misses a vote, a point is added. + After 5 points the staker is removed from + the stakers set. + is_leaving: + type: boolean + title: |- + is_leaving indicates if a user has scheduled a + a PoolLeave entry. After the leave-time is over + the staker will no longer participate in that pool + valaddress: + type: string + description: >- + Valaddress is the address which is authorized to + vote + + and submit bundles. If the server gets compromised + + the staker can just change the valaddress. + balance: + type: string + format: uint64 + title: >- + balance is the valaddress account balance and + indicates + + whether or not the valaccount needs additional + funds to + + pay for gas fees + title: |- + PoolMembership shows in which pool the staker + is participating + description: |- + pools is a list of all pools the staker is currently + participating, i.e. allowed to vote and upload data. + title: >- + FullStaker aggregates information from the staker and its + delegators + + as well as pending queue entries into one object. + + It contains almost all needed information for a convenient + usage + description: stakers ... + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: QueryStakersByPoolCountResponse ... + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - QueryStakers + /kyve/stakers/v1beta1/params: + get: + summary: Parameters queries the parameters of the module. + operationId: StakersParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + commission_change_time: + type: string + format: uint64 + description: commission_change_time ... + leave_pool_time: + type: string + format: uint64 + description: commission_change_time ... + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - QueryStakers + /kyve/team/v1beta1/team_info: + get: + summary: TeamInfo queries all important information from the team module + operationId: TeamInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + authority: + type: string + title: authority is the authorities address + total_team_allocation: + type: string + format: uint64 + title: >- + total_team_allocation is the total allocation in $KYVE the + team module has in order to reward team members + issued_team_allocation: + type: string + format: uint64 + title: >- + issued_team_allocation is the amount in $KYVE tied to team + vesting accounts and which are not available anymore + available_team_allocation: + type: string + format: uint64 + title: >- + available_team_allocation is the amount in $KYVE with which + further team vesting accounts can be created. + + if the available amount is zero no new vesting accounts can be + created + total_authority_rewards: + type: string + format: uint64 + title: >- + total_authority_rewards is the amount in $KYVE the authority + has earned in total with inflation rewards. + + Those rewards can be payed out for different purposes + claimed_authority_rewards: + type: string + format: uint64 + title: >- + claimed_authority_rewards is the amount in $KYVE of how much + the authority already claimed + available_authority_rewards: + type: string + format: uint64 + title: >- + available_authority_rewards is the amount in $KYVE of how much + rewards the authority can claim right now + total_account_rewards: + type: string + format: uint64 + title: >- + total_account_rewards is the amount in $KYVE all team vesting + accounts have ever received + claimed_account_rewards: + type: string + format: uint64 + title: >- + claimed_account_rewards is the amount in $KYVE all team + vesting accounts have ever claimed + available_account_rewards: + type: string + format: uint64 + title: >- + available_account_rewards is the total amount of $KYVE all + team vesting accounts can currently claim + required_module_balance: + type: string + format: uint64 + title: >- + required_module_balance is the balance the team module should + have. If this is less than the module balance + + something went wrong + team_module_balance: + type: string + format: uint64 + title: team_module_balance is the team module balance in $KYVE + description: >- + QueryAccountsResponse is response type for the Query/TeamInfo RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - QueryTeam + /kyve/team/v1beta1/team_vesting_account/{id}: + get: + summary: TeamVestingAccount queries the team vesting accounts of the module. + operationId: TeamVestingAccount + responses: + '200': + description: A successful response. + schema: + type: object + properties: + account: + title: account holds the requested team vesting account + type: object + properties: + id: + type: string + format: uint64 + description: >- + id is a unique identify for each vesting account, tied to + a single team member. + total_allocation: + type: string + format: uint64 + description: >- + total_allocation is the number of tokens reserved for this + team member. + commencement: + type: string + format: uint64 + title: >- + commencement is the unix timestamp of the member's + official start date in seconds + clawback: + type: string + format: uint64 + title: >- + clawback is a unix timestamp of a clawback in seconds. If + timestamp is zero + + it means that the account has not received a clawback + unlocked_claimed: + type: string + format: uint64 + title: >- + unlocked_claimed is the amount of $KYVE already claimed by + the account holder + last_claimed_time: + type: string + format: uint64 + title: the last time the unlocked amount was claimed + total_rewards: + type: string + format: uint64 + title: >- + total rewards is the total amount of rewards the account + has received ever + rewards_claimed: + type: string + format: uint64 + title: >- + rewards claimed is the amount inflation rewards claimed by + account holder + description: TeamVestingAccount ... + description: >- + QueryTeamVestingAccountResponse is the response type for the + Query/TeamVestingAccount RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: id + description: >- + id is a unique identify for each vesting account, tied to a single + team member. + in: path + required: true + type: string + format: uint64 + tags: + - QueryTeam + /kyve/team/v1beta1/team_vesting_accounts: + get: + summary: TeamVestingAccounts queries all team vesting accounts of the module. + operationId: TeamVestingAccounts + responses: + '200': + description: A successful response. + schema: + type: object + properties: + accounts: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: >- + id is a unique identify for each vesting account, tied + to a single team member. + total_allocation: + type: string + format: uint64 + description: >- + total_allocation is the number of tokens reserved for + this team member. + commencement: + type: string + format: uint64 + title: >- + commencement is the unix timestamp of the member's + official start date in seconds + clawback: + type: string + format: uint64 + title: >- + clawback is a unix timestamp of a clawback in seconds. + If timestamp is zero + + it means that the account has not received a clawback + unlocked_claimed: + type: string + format: uint64 + title: >- + unlocked_claimed is the amount of $KYVE already claimed + by the account holder + last_claimed_time: + type: string + format: uint64 + title: the last time the unlocked amount was claimed + total_rewards: + type: string + format: uint64 + title: >- + total rewards is the total amount of rewards the account + has received ever + rewards_claimed: + type: string + format: uint64 + title: >- + rewards claimed is the amount inflation rewards claimed + by account holder + description: TeamVestingAccount ... + description: accounts holds all the team vesting accounts of this module. + description: >- + QueryAccountsResponse is response type for the + Query/TeamVestingAccounts RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - QueryTeam + /kyve/team/v1beta1/team_vesting_status/{id}: + get: + summary: >- + TeamCurrentVestingStatus queries the current vesting progress of a team + vesting account + operationId: TeamVestingStatus + responses: + '200': + description: A successful response. + schema: + type: object + properties: + request_date: + type: string + description: request_date .. + plan: + description: plan ... + type: object + properties: + commencement: + type: string + description: commencement ... + token_vesting_start: + type: string + description: token_vesting_start ... + token_vesting_finished: + type: string + description: token_vesting_finished ... + token_unlock_start: + type: string + description: token_unlock_start ... + token_unlock_finished: + type: string + description: token_unlock_finished ... + clawback: + type: string + format: uint64 + description: clawback ... + clawback_amount: + type: string + format: uint64 + description: clawback_amount ... + maximum_vesting_amount: + type: string + format: uint64 + description: maximum_vesting_amount ... + title: >- + QueryVestingPlan is a type holding information about the + account's vesting data which does not change + status: + description: status .. + type: object + properties: + total_vested_amount: + type: string + format: uint64 + description: total_vested_amount ... + total_unlocked_amount: + type: string + format: uint64 + description: total_unlocked_amount ... + current_claimable_amount: + type: string + format: uint64 + description: current_claimable_amount ... + locked_vested_amount: + type: string + format: uint64 + description: locked_vested_amount ... + remaining_unvested_amount: + type: string + format: uint64 + description: remaining_unvested_amount ... + claimed_amount: + type: string + format: uint64 + description: claimed_amount ... + total_rewards: + type: string + format: uint64 + description: total_rewards ... + claimed_rewards: + type: string + format: uint64 + description: claimed_rewards ... + available_rewards: + type: string + format: uint64 + description: available_rewards ... + title: >- + QueryVestingStatus is a type holding information about the + account's vesting progress + description: >- + QueryTeamCurrentVestingStatusResponse is the response type for the + Query/TeamCurrentVestingStatus RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: id + description: >- + id is a unique identify for each vesting account, tied to a single + team member. + in: path + required: true + type: string + format: uint64 + tags: + - QueryTeam + /kyve/team/v1beta1/team_vesting_status_by_time/{id}/{time}: + get: + summary: >- + TeamCurrentVestingStatus queries the current vesting progress of a team + vesting account + operationId: TeamVestingStatusByTime + responses: + '200': + description: A successful response. + schema: + type: object + properties: + request_date: + type: string + description: request_date .. + plan: + description: plan ... + type: object + properties: + commencement: + type: string + description: commencement ... + token_vesting_start: + type: string + description: token_vesting_start ... + token_vesting_finished: + type: string + description: token_vesting_finished ... + token_unlock_start: + type: string + description: token_unlock_start ... + token_unlock_finished: + type: string + description: token_unlock_finished ... + clawback: + type: string + format: uint64 + description: clawback ... + clawback_amount: + type: string + format: uint64 + description: clawback_amount ... + maximum_vesting_amount: + type: string + format: uint64 + description: maximum_vesting_amount ... + title: >- + QueryVestingPlan is a type holding information about the + account's vesting data which does not change + status: + description: status .. + type: object + properties: + total_vested_amount: + type: string + format: uint64 + description: total_vested_amount ... + total_unlocked_amount: + type: string + format: uint64 + description: total_unlocked_amount ... + current_claimable_amount: + type: string + format: uint64 + description: current_claimable_amount ... + locked_vested_amount: + type: string + format: uint64 + description: locked_vested_amount ... + remaining_unvested_amount: + type: string + format: uint64 + description: remaining_unvested_amount ... + claimed_amount: + type: string + format: uint64 + description: claimed_amount ... + total_rewards: + type: string + format: uint64 + description: total_rewards ... + claimed_rewards: + type: string + format: uint64 + description: claimed_rewards ... + available_rewards: + type: string + format: uint64 + description: available_rewards ... + title: >- + QueryVestingStatus is a type holding information about the + account's vesting progress + description: >- + QueryTeamVestingStatusByTimeResponse is the response type for the + Query/TeamCurrentVestingByTimeStatus RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: id + description: >- + id is a unique identify for each vesting account, tied to a single + team member. + in: path + required: true + type: string + format: uint64 + - name: time + description: >- + time is a unix timestamp of the time the vesting progress should be + calculated + in: path + required: true + type: string + format: uint64 + tags: + - QueryTeam diff --git a/go.mod b/go.mod new file mode 100644 index 00000000..504a6c17 --- /dev/null +++ b/go.mod @@ -0,0 +1,307 @@ +module github.com/KYVENetwork/chain + +go 1.19 + +require ( + cosmossdk.io/errors v1.0.0-beta.7 + cosmossdk.io/math v1.0.0-beta.3 + github.com/cosmos/cosmos-proto v1.0.0-alpha8 + github.com/cosmos/cosmos-sdk v0.46.8 + github.com/cosmos/gogoproto v1.4.2 + github.com/cosmos/ibc-go/v5 v5.2.0 + github.com/ethereum/go-ethereum v1.10.17 + github.com/gogo/protobuf v1.3.3 + github.com/golang/protobuf v1.5.2 + github.com/golangci/golangci-lint v1.50.1 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/onsi/ginkgo/v2 v2.1.4 + github.com/onsi/gomega v1.20.0 + github.com/spf13/cast v1.5.0 + github.com/spf13/cobra v1.6.0 + github.com/stretchr/testify v1.8.1 + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint v0.34.24 + github.com/tendermint/tm-db v0.6.7 + google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a + google.golang.org/grpc v1.50.1 + mvdan.cc/gofumpt v0.4.0 +) + +require ( + 4d63.com/gochecknoglobals v0.1.0 // indirect + cloud.google.com/go v0.102.1 // indirect + cloud.google.com/go/compute v1.7.0 // indirect + cloud.google.com/go/iam v0.4.0 // indirect + cloud.google.com/go/storage v1.22.1 // indirect + filippo.io/edwards25519 v1.0.0-rc.1 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/Abirdcfly/dupword v0.0.7 // indirect + github.com/Antonboom/errname v0.1.7 // indirect + github.com/Antonboom/nilnil v0.1.1 // indirect + github.com/BurntSushi/toml v1.2.1 // indirect + github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect + github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/OpenPeeDeeP/depguard v1.1.1 // indirect + github.com/Workiva/go-datastructures v1.0.53 // indirect + github.com/alexkohler/prealloc v1.0.0 // indirect + github.com/alingse/asasalint v0.0.11 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect + github.com/aws/aws-sdk-go v1.40.45 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/blizzy78/varnamelen v0.8.0 // indirect + github.com/bombsimon/wsl/v3 v3.3.0 // indirect + github.com/breml/bidichk v0.2.3 // indirect + github.com/breml/errchkjson v0.3.0 // indirect + github.com/btcsuite/btcd v0.22.2 // indirect + github.com/butuzov/ireturn v0.1.1 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/charithe/durationcheck v0.0.9 // indirect + github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/coinbase/rosetta-sdk-go v0.7.9 // indirect + github.com/confio/ics23/go v0.9.0 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gorocksdb v1.2.0 // indirect + github.com/cosmos/iavl v0.19.4 // indirect + github.com/cosmos/ledger-cosmos-go v0.12.2 // indirect + github.com/creachadair/taskgroup v0.3.2 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect + github.com/daixiang0/gci v0.8.1 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.0 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac // indirect + github.com/dvsekhvalnov/jose2go v1.5.0 // indirect + github.com/esimonov/ifshort v1.0.4 // indirect + github.com/ettle/strcase v0.1.1 // indirect + github.com/fatih/color v1.13.0 // indirect + github.com/fatih/structtag v1.2.0 // indirect + github.com/felixge/httpsnoop v1.0.1 // indirect + github.com/firefart/nonamedreturns v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fzipp/gocyclo v0.6.0 // indirect + github.com/go-critic/go-critic v0.6.5 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-playground/validator/v10 v10.4.1 // indirect + github.com/go-toolsmith/astcast v1.0.0 // indirect + github.com/go-toolsmith/astcopy v1.0.2 // indirect + github.com/go-toolsmith/astequal v1.0.3 // indirect + github.com/go-toolsmith/astfmt v1.0.0 // indirect + github.com/go-toolsmith/astp v1.0.0 // indirect + github.com/go-toolsmith/strparse v1.0.0 // indirect + github.com/go-toolsmith/typep v1.0.2 // indirect + github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gofrs/flock v0.8.1 // indirect + github.com/gogo/gateway v1.1.0 // indirect + github.com/golang/glog v1.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect + github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect + github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect + github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect + github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect + github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect + github.com/golangci/misspell v0.3.5 // indirect + github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect + github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect + github.com/google/btree v1.0.1 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/orderedcode v0.0.1 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/go-type-adapters v1.0.0 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/gorilla/handlers v1.5.1 // indirect + github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gostaticanalysis/analysisutil v0.7.1 // indirect + github.com/gostaticanalysis/comment v1.4.2 // indirect + github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect + github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.6.1 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect + github.com/hexops/gotextdiff v1.0.3 // indirect + github.com/improbable-eng/grpc-web v0.15.0 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/jgautheron/goconst v1.5.1 // indirect + github.com/jingyugao/rowserrcheck v1.1.1 // indirect + github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmhodges/levigo v1.0.0 // indirect + github.com/julz/importas v0.1.0 // indirect + github.com/kisielk/errcheck v1.6.2 // indirect + github.com/kisielk/gotool v1.0.0 // indirect + github.com/kkHAIKE/contextcheck v1.1.3 // indirect + github.com/klauspost/compress v1.15.11 // indirect + github.com/kulti/thelper v0.6.3 // indirect + github.com/kunwardeep/paralleltest v1.0.6 // indirect + github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/ldez/gomoddirectives v0.2.3 // indirect + github.com/ldez/tagliatelle v0.3.1 // indirect + github.com/leonklingele/grouper v1.1.0 // indirect + github.com/lib/pq v1.10.6 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/lufeee/execinquery v1.2.1 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/maratori/testpackage v1.1.0 // indirect + github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-runewidth v0.0.10 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mbilski/exhaustivestruct v1.2.0 // indirect + github.com/mgechev/revive v1.2.4 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/minio/highwayhash v1.0.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/moricho/tparallel v0.2.1 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/nakabonne/nestif v0.3.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect + github.com/nishanths/exhaustive v0.8.3 // indirect + github.com/nishanths/predeclared v0.2.2 // indirect + github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/otiai10/copy v1.6.0 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/polyfloyd/go-errorlint v1.0.5 // indirect + github.com/prometheus/client_golang v1.12.2 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.34.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/quasilyte/go-ruleguard v0.3.18 // indirect + github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f // indirect + github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect + github.com/rakyll/statik v0.1.7 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/regen-network/cosmos-proto v0.3.1 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/rs/cors v1.8.2 // indirect + github.com/rs/zerolog v1.27.0 // indirect + github.com/ryancurrah/gomodguard v1.2.4 // indirect + github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect + github.com/securego/gosec/v2 v2.13.1 // indirect + github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect + github.com/sirupsen/logrus v1.9.0 // indirect + github.com/sivchari/containedctx v1.0.2 // indirect + github.com/sivchari/nosnakecase v1.7.0 // indirect + github.com/sivchari/tenv v1.7.0 // indirect + github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.13.0 // indirect + github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect + github.com/stretchr/objx v0.5.0 // indirect + github.com/subosito/gotenv v1.4.1 // indirect + github.com/tdakkota/asciicheck v0.1.1 // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tetafro/godot v1.4.11 // indirect + github.com/tidwall/btree v1.5.0 // indirect + github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect + github.com/timonwong/loggercheck v0.9.3 // indirect + github.com/tomarrell/wrapcheck/v2 v2.7.0 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ulikunitz/xz v0.5.8 // indirect + github.com/ultraware/funlen v0.0.3 // indirect + github.com/ultraware/whitespace v0.0.5 // indirect + github.com/uudashr/gocognit v1.0.6 // indirect + github.com/yagipy/maintidx v1.0.0 // indirect + github.com/yeya24/promlinter v0.2.0 // indirect + github.com/zondax/hid v0.9.1 // indirect + github.com/zondax/ledger-go v0.14.1 // indirect + gitlab.com/bosi/decorder v0.2.3 // indirect + go.etcd.io/bbolt v1.3.6 // indirect + go.opencensus.io v0.23.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/goleak v1.1.12 // indirect + go.uber.org/multierr v1.8.0 // indirect + go.uber.org/zap v1.23.0 // indirect + golang.org/x/crypto v0.2.0 // indirect + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect + golang.org/x/mod v0.6.0 // indirect + golang.org/x/net v0.2.0 // indirect + golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect + golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect + golang.org/x/sys v0.2.0 // indirect + golang.org/x/term v0.2.0 // indirect + golang.org/x/text v0.4.0 // indirect + golang.org/x/tools v0.2.0 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect + google.golang.org/api v0.93.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + honnef.co/go/tools v0.3.3 // indirect + mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect + mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect + mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect + nhooyr.io/websocket v1.8.6 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect +) + +replace ( + // use kyve flavored cosmos/cosmos-sdk + github.com/cosmos/cosmos-sdk => github.com/KYVENetwork/cosmos-sdk v0.46.8-kyve-rc0 + + // use cosmos flavored gogo/protobuf + // https://github.com/cosmos/cosmos-sdk/issues/8469 + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + + // use informal flavored tendermint/tendermint + // https://github.com/tendermint/tendermint/issues/9972 + github.com/tendermint/tendermint => github.com/informalsystems/tendermint v0.34.24 +) diff --git a/go.sum b/go.sum new file mode 100644 index 00000000..dfce0921 --- /dev/null +++ b/go.sum @@ -0,0 +1,1997 @@ +4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.4.0 h1:YBYU00SCDzZJdHqVc4I5d6lsklcYIjQZa1YmEz4jlSE= +cloud.google.com/go/iam v0.4.0/go.mod h1:cbaZxyScUhxl7ZAkNWiALgihfP75wS/fUsVNaa1r3vA= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1 h1:F6IlQJZrZM++apn9V5/VfS3gbTUYg98PS3EMQAzqtfg= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +cosmossdk.io/errors v1.0.0-beta.7 h1:gypHW76pTQGVnHKo6QBkb4yFOJjC+sUGRc5Al3Odj1w= +cosmossdk.io/errors v1.0.0-beta.7/go.mod h1:mz6FQMJRku4bY7aqS/Gwfcmr/ue91roMEKAmDUDpBfE= +cosmossdk.io/math v1.0.0-beta.3 h1:TbZxSopz2LqjJ7aXYfn7nJSb8vNaBklW6BLpcei1qwM= +cosmossdk.io/math v1.0.0-beta.3/go.mod h1:3LYasri3Zna4XpbrTNdKsWmD5fHHkaNAod/mNT9XdE4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0-rc.1 h1:m0VOOB23frXZvAOK44usCgLWvtsxIoMCTBGJZlpmGfU= +filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= +git.sr.ht/~sircmpwn/getopt v0.0.0-20191230200459-23622cc906b3/go.mod h1:wMEGFFFNuPos7vHmWXfszqImLppbc0wEhh6JBfJIUgw= +git.sr.ht/~sircmpwn/go-bare v0.0.0-20210406120253-ab86bc2846d9/go.mod h1:BVJwbDfVjCjoFiKrhkei6NdGcZYpkDkdyCdg1ukytRA= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/Abirdcfly/dupword v0.0.7 h1:z14n0yytA3wNO2gpCD/jVtp/acEXPGmYu0esewpBt6Q= +github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk= +github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= +github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= +github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= +github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI= +github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM= +github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= +github.com/KYVENetwork/cosmos-sdk v0.46.8-kyve-rc0 h1:CbqKfDADxi0fYJ3kV/aIrYlpV+h96VJnW90uSsbtydQ= +github.com/KYVENetwork/cosmos-sdk v0.46.8-kyve-rc0/go.mod h1:lg+FqwndbbCYQk1YTUWRDpOsNbQG0nINQqxY7ZnsAP8= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.1.1 h1:TSUznLjvp/4IUP+OQ0t/4jF4QUyxIcVX8YnghZdunyA= +github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= +github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= +github.com/Zilliqa/gozilliqa-sdk v1.2.1-0.20201201074141-dd0ecada1be6/go.mod h1:eSYp2T6f0apnuW8TzhV3f6Aff2SE8Dwio++U4ha4yEM= +github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= +github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.40.45 h1:QN1nsY27ssD/JmW4s83qmSb+uL6DG4GmCDzjmJB4xUI= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= +github.com/aws/aws-sdk-go-v2/config v1.1.1/go.mod h1:0XsVy9lBI/BCXm+2Tuvt39YmdHwS5unDQmxZOYe8F5Y= +github.com/aws/aws-sdk-go-v2/credentials v1.1.1/go.mod h1:mM2iIjwl7LULWtS6JCACyInboHirisUUdkBPoTHMOUo= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.2/go.mod h1:3hGg3PpiEjHnrkrlasTfxFqUsZ2GCk/fMUn4CbKgSkM= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= +github.com/aws/aws-sdk-go-v2/service/route53 v1.1.1/go.mod h1:rLiOUrPLW/Er5kRcQ7NkwbjlijluLsrIbu/iyl35RO4= +github.com/aws/aws-sdk-go-v2/service/sso v1.1.1/go.mod h1:SuZJxklHxLAXgLTc1iFXbEWkXs7QRTQpCLGaKIprQW0= +github.com/aws/aws-sdk-go-v2/service/sts v1.1.1/go.mod h1:Wi0EBZwiz/K44YliU0EKxqTCJGUfYTWXrrBwkq736bM= +github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI= +github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A= +github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw= +github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU= +github.com/btcsuite/btcd v0.0.0-20190315201642-aa6e0f35703c/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.21.0-beta.0.20201114000516-e9c7a5ac6401/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd v0.22.2 h1:vBZ+lGGd1XubpOWO67ITJpAEsICWhA0YzqkcpkgNBfo= +github.com/btcsuite/btcd v0.22.2/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd/btcec/v2 v2.1.2/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ= +github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20220630083739-93517212f375 h1:E7LT642ysztPWE0dfz43cWOvMiF42DyTRC+eZIaO4yI= +github.com/chavacava/garif v0.0.0-20220630083739-93517212f375/go.mod h1:4m1Rv7xfuwWPNKXlThldNuJvutYM6J95wNuuVmn55To= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coinbase/kryptology v1.8.0/go.mod h1:RYXOAPdzOGUe3qlSFkMGn58i3xUA8hmxYHksuq+8ciI= +github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA= +github.com/coinbase/rosetta-sdk-go v0.7.9/go.mod h1:0/knutI7XGVqXmmH4OQD8OckFrbQ8yMsUZTG7FXCR2M= +github.com/confio/ics23/go v0.9.0 h1:cWs+wdbS2KRPZezoaaj+qBleXgUk5WOQFMP3CQFGTr4= +github.com/confio/ics23/go v0.9.0/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/bavard v0.1.8-0.20210915155054-088da2f7f54a/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= +github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1:815PAHg3wvysy0SyIqanF8gZ0Y1wjk/hrDHD/iT88+Q= +github.com/consensys/gnark-crypto v0.5.3/go.mod h1:hOdPlWQV1gDLp7faZVeg8Y0iEPFaOUnCc4XeCCk96p0= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-alpha8 h1:d3pCRuMYYvGA5bM0ZbbjKn+AoQD4A7dyNG2wzwWalUw= +github.com/cosmos/cosmos-proto v1.0.0-alpha8/go.mod h1:6/p+Bc4O8JKeZqe0VqUGTX31eoYqemTT4C1hLCWsO7I= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogoproto v1.4.2 h1:UeGRcmFW41l0G0MiefWhkPEVEwvu78SZsHBvI78dAYw= +github.com/cosmos/gogoproto v1.4.2/go.mod h1:cLxOsn1ljAHSV527CHOtaIP91kK6cCrZETRBrkzItWU= +github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= +github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= +github.com/cosmos/iavl v0.19.4 h1:t82sN+Y0WeqxDLJRSpNd8YFX5URIrT+p8n6oJbJ2Dok= +github.com/cosmos/iavl v0.19.4/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= +github.com/cosmos/ibc-go/v5 v5.2.0 h1:LxwttRQqdUJpQ3/Gc3XPg5lkRo3pcbzx65dxFIY6ONE= +github.com/cosmos/ibc-go/v5 v5.2.0/go.mod h1:MhDUMDVSboK5JW2pEWHNcw0wJHaHqKV/vwwP7awGhzI= +github.com/cosmos/ledger-cosmos-go v0.12.2 h1:/XYaBlE2BJxtvpkHiBm97gFGSGmYGKunKyF3nNqAXZA= +github.com/cosmos/ledger-cosmos-go v0.12.2/go.mod h1:ZcqYgnfNJ6lAXe4HPtWgarNEY+B74i+2/8MhZw4ziiI= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cristalhq/acmd v0.8.1/go.mod h1:LG5oa43pE/BbxtfMoImHCQN++0Su7dzipdgBjMCBVDQ= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/daixiang0/gci v0.8.1 h1:T4xpSC+hmsi4CSyuYfIJdMZAr9o7xZmHpQVygMghGZ4= +github.com/daixiang0/gci v0.8.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= +github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac h1:opbrjaN/L8gg6Xh5D04Tem+8xVcz6ajZlGCs49mQgyg= +github.com/dustin/go-humanize v1.0.1-0.20200219035652-afde56e7acac/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= +github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= +github.com/ethereum/go-ethereum v1.10.17 h1:XEcumY+qSr1cZQaWsQs5Kck3FHB0V2RiMHPdTBJ+oT8= +github.com/ethereum/go-ethereum v1.10.17/go.mod h1:Lt5WzjM07XlXc95YzrhosmR4J9Ahd6X2wyEV2SvGhk0= +github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-critic/go-critic v0.6.5 h1:fDaR/5GWURljXwF8Eh31T2GZNz9X4jeboS912mWF8Uo= +github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.2 h1:YnWf5Rnh1hUudj11kei53kI57quN/VH6Hp1n+erozn0= +github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.2/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.0.3 h1:+LVdyRatFS+XO78SGV4I3TCEA0AC7fKEGma+fH+674o= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o= +github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= +github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= +github.com/golangci/golangci-lint v1.50.1 h1:C829clMcZXEORakZlwpk7M4iDw2XiwxxKaG504SZ9zY= +github.com/golangci/golangci-lint v1.50.1/go.mod h1:AQjHBopYS//oB8xs0y0M/dtxdKHkdhl0RvmjUct0/4w= +github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0 h1:6eUflI3DiGusXGK6X7cCcIgVCpZ2CiZ1Q7jl6ZxNV70= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3Uqrmrcpk= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.6.1 h1:NASsgP4q6tL94WH6nJxKWj8As2H/2kop/bB1d8JMyRY= +github.com/hashicorp/go-getter v1.6.1/go.mod h1:IZCrswsZPeWv9IkVnLElzRU/gz/QPi6pZHn4tv6vbwA= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 h1:aSVUgRRRtOrZOC1fYmY9gV0e9z/Iu+xNVSASWjsuyGU= +github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3/go.mod h1:5PC6ZNPde8bBqU/ewGZig35+UIZtw9Ytxez8/q5ZyFE= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huin/goupnp v1.0.3-0.20220313090229-ca81a64b4204/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= +github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxdb-client-go/v2 v2.4.0/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/informalsystems/tendermint v0.34.24 h1:2beNEg5tp+U5oj/Md+0xDBsMHGbdue31T3OrstS6xS0= +github.com/informalsystems/tendermint v0.34.24/go.mod h1:rXVrl4OYzmIa1I91av3iLv2HS0fGSiucyW9J4aMTpKI= +github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b h1:izTof8BKh/nE1wrKOrloNA5q4odOarjf+Xpe+4qow98= +github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= +github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karalabe/usb v0.0.2/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c= +github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR0uvmw= +github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= +github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= +github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= +github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= +github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lucasjones/reggen v0.0.0-20180717132126-cdb49ff09d77/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= +github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= +github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= +github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/revive v1.2.4 h1:+2Hd/S8oO2H0Ikq2+egtNwQsVhAeELHjxjIUFX5ajLI= +github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/neilotoole/errgroup v0.1.6/go.mod h1:Q2nLGf+594h0CLBs/Mbg6qOr7GtqDK7C2S41udRnToE= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.8.3 h1:pw5O09vwg8ZaditDp/nQRqVnrMczSJDxRDJMowvhsrM= +github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= +github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.20.0 h1:8W0cWlwFkflGPLltQvLRB7ZVD5HuP6ng320w2IS245Q= +github.com/onsi/gomega v1.20.0/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.6.0 h1:IinKAryFFuPONZ7cm6T6E2QX/vcJwSnlaA5lfoaXIiQ= +github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= +github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v1.0.5 h1:AHB5JRCjlmelh9RrLxT9sgzpalIwwq4hqE8EkwIwKdY= +github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= +github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= +github.com/quasilyte/go-ruleguard v0.3.18 h1:sd+abO1PEI9fkYennwzHn9kl3nqP6M5vE7FiOzZ+5CE= +github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= +github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.21/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f h1:6Gtn2i04RD0gVyYf2/IUMTIs+qYleBt4zxDqkLTcu4U= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/cosmos-proto v0.3.1 h1:rV7iM4SSFAagvy8RiyhiACbWEGotmqzywPxOvwMdxcg= +github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNwWejXC7QqCOnH3O0+YM= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= +github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= +github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= +github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.20.0 h1:K6CXjqqtSYSsuyRDDC7Sjn6vTMLiSJa4ZmDkiokoqtw= +github.com/sashamelentyev/usestdlibvars v1.20.0/go.mod h1:0GaP+ecfZMXShS0A94CJn6aEuPRILv8h/VuWI9n1ygg= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec/v2 v2.13.1 h1:7mU32qn2dyC81MH9L2kefnQyRMUarfDER3iQyMHcjYM= +github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo= +github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= +github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE= +github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= +github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= +github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= +github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= +github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= +github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= +github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timonwong/loggercheck v0.9.3 h1:ecACo9fNiHxX4/Bc02rW2+kaJIAMAes7qJ7JKxt0EZI= +github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= +github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= +github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.7.0 h1:J/F8DbSKJC83bAvC6FoZaRjZiZ/iKoueSdrEkmGeacA= +github.com/tomarrell/wrapcheck/v2 v2.7.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/tyler-smith/go-bip39 v1.0.2/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= +github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= +github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= +github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/ybbus/jsonrpc v2.1.2+incompatible/go.mod h1:XJrh1eMSzdIYFbM08flv0wp5G35eRniyeGut1z+LSiE= +github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= +github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= +github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= +gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= +gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= +go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= +golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= +golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220517195934-5e4e11fc645e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.93.0 h1:T2xt9gi0gHdxdnRkVQhT8mIvPaXKNsDNWz+L696M66M= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a h1:GH6UPn3ixhWcKDhpnEC55S75cerLPdpp3hrhfKYjZgw= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8 h1:KR8+MyP7/qOlV+8Af01LtjL04bu7on42eVsxT4EyBQk= +google.golang.org/protobuf v1.28.2-0.20220831092852-f930b1dc76e8/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= +gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= +honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= +mvdan.cc/gofumpt v0.4.0 h1:JVf4NN1mIpHogBj7ABpgOyZc65/UUOkKQFkoURsz4MM= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v0.5.3 h1:163N50IHFqr1phZens4FQOdPgfJscR7a562mjQqeo4M= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/proto/Dockerfile b/proto/Dockerfile new file mode 100644 index 00000000..bef5d2da --- /dev/null +++ b/proto/Dockerfile @@ -0,0 +1,26 @@ +FROM bufbuild/buf:latest as BUILDER +FROM golang:1.19-alpine + +RUN apk add --no-cache \ + nodejs \ + npm \ + git \ + make + +RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@latest +RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@latest + +RUN git clone https://github.com/regen-network/protobuf.git; \ + cd protobuf; \ + go mod download; \ + make install; \ + cd .. + +RUN git clone https://github.com/regen-network/cosmos-proto.git; \ + cd cosmos-proto/protoc-gen-gocosmos; \ + go install .; \ + cd .. + +RUN npm install -g swagger-combine + +COPY --from=BUILDER /usr/local/bin /usr/local/bin diff --git a/proto/buf.gen.yaml b/proto/buf.gen.yaml new file mode 100644 index 00000000..90dcc2a7 --- /dev/null +++ b/proto/buf.gen.yaml @@ -0,0 +1,12 @@ +version: v1 +plugins: + - name: gocosmos + out: .. + opt: + - plugins=grpc + - name: grpc-gateway + out: .. + - name: swagger + out: ../tmp-swagger-gen + opt: + - simple_operation_ids=true diff --git a/proto/buf.lock b/proto/buf.lock new file mode 100644 index 00000000..ecb58968 --- /dev/null +++ b/proto/buf.lock @@ -0,0 +1,19 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + commit: 1935555c206d4afb9e94615dfd0fad31 + - remote: buf.build + owner: cosmos + repository: cosmos-sdk + commit: 8cb30a2c4de74dc9bd8d260b1e75e176 + - remote: buf.build + owner: cosmos + repository: gogo-proto + commit: 34d970b699f84aa382f3c29773a60836 + - remote: buf.build + owner: googleapis + repository: googleapis + commit: 783e4b5374fa488ab068d08af9658438 diff --git a/proto/buf.yaml b/proto/buf.yaml new file mode 100644 index 00000000..24561cc0 --- /dev/null +++ b/proto/buf.yaml @@ -0,0 +1,18 @@ +version: v1 +name: buf.build/kyve/chain +deps: + # Cosmos SDK v0.46 (https://github.com/cosmos/cosmos-sdk/tree/main/proto#sdk-x-buf) + - buf.build/cosmos/cosmos-sdk:8cb30a2c4de74dc9bd8d260b1e75e176 + - buf.build/cosmos/gogo-proto + - buf.build/googleapis/googleapis +breaking: + use: + - FILE +lint: + use: + - DEFAULT + - COMMENTS + except: + - SERVICE_SUFFIX + - RPC_REQUEST_STANDARD_NAME + - RPC_RESPONSE_STANDARD_NAME diff --git a/proto/generate.sh b/proto/generate.sh new file mode 100644 index 00000000..27a7fa57 --- /dev/null +++ b/proto/generate.sh @@ -0,0 +1,9 @@ +cd proto +buf generate +cd .. + +cp -r github.com/KYVENetwork/chain/* ./ +rm -rf github.com + +swagger-combine ./docs/config.json -o ./docs/swagger.yml +rm -rf tmp-swagger-gen diff --git a/proto/kyve/bundles/v1beta1/bundles.proto b/proto/kyve/bundles/v1beta1/bundles.proto new file mode 100644 index 00000000..20ed06f1 --- /dev/null +++ b/proto/kyve/bundles/v1beta1/bundles.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; + +package kyve.bundles.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/bundles/types"; + +// BundleStatus represents the status of an evaluated bundle +// proposal. +enum BundleStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // BUNDLE_STATUS_UNSPECIFIED ... + BUNDLE_STATUS_UNSPECIFIED = 0; + // BUNDLE_STATUS_VALID ... + BUNDLE_STATUS_VALID = 1; + // BUNDLE_STATUS_INVALID ... + BUNDLE_STATUS_INVALID = 2; + // BUNDLE_STATUS_NO_FUNDS ... + BUNDLE_STATUS_NO_FUNDS = 3; + // BUNDLE_STATUS_NO_QUORUM ... + BUNDLE_STATUS_NO_QUORUM = 4; + // BUNDLE_STATUS_DISABLED ... + BUNDLE_STATUS_DISABLED = 5; +} + +// BundleProposal represents the current bundle proposal +// of a storage pool +message BundleProposal { + // pool_id is the id of the pool for which this proposal is for + uint64 pool_id = 1; + // storage_id is the id with which the data can be retrieved from + string storage_id = 2; + // uploader is the address of the staker who submitted the current proposal + string uploader = 3; + // next_uploader is the address of the staker who should upload the next proposal + string next_uploader = 4; + // data_size the size of the data in bytes + uint64 data_size = 5; + // bundle_size the size of the bundle (amount of data items) + uint64 bundle_size = 6; + // to_key the key of the last data item in the bundle proposal + string to_key = 7; + // bundle_summary a string summary of the current proposal + string bundle_summary = 8; + // data_hash a sha256 hash of the raw compressed data + string data_hash = 9; + // updated_at the last time this proposal was edited + uint64 updated_at = 10; + // voters_valid list of all stakers who voted in favor for current proposal + repeated string voters_valid = 11; + // voters_invalid list of all stakers who voted against for current proposal + repeated string voters_invalid = 12; + // voters_abstain list of all stakers who voted abstain for current proposal + repeated string voters_abstain = 13; + // from_key the key of the first data item in the bundle proposal + string from_key = 14; + // storage_provider_id the id of the storage provider where the bundle is stored + uint32 storage_provider_id = 15; + // compression_id the id of the compression type with which the data was compressed + uint32 compression_id = 16; +} + +// FinalizedBundle represents a bundle proposal where the majority +// agreed on its validity +message FinalizedBundle { + // pool_id is the id of the pool for which this proposal is for + uint64 pool_id = 1; + // id is a unique identifier for each finalized bundle in a pool + uint64 id = 2; + // storage_id is the id with which the data can be retrieved from + string storage_id = 3; + // uploader is the address of the staker who submitted this bundle + string uploader = 4; + // from_index is the index from where the bundle starts (inclusive) + uint64 from_index = 5; + // to_index is the index to which the bundle goes (exclusive) + uint64 to_index = 6; + // to_key the key of the last data item in the bundle proposal + string to_key = 7; + // bundle_summary a string summary of the current proposal + string bundle_summary = 8; + // data_hash a sha256 hash of the raw compressed data + string data_hash = 9; + // finalized_at is the block height at which this bundle got finalized + uint64 finalized_at = 10; + // from_key the key of the first data item in the bundle proposal + string from_key = 11; + // storage_provider_id the id of the storage provider where the bundle is stored + uint32 storage_provider_id = 12; + // compression_id the id of the compression type with which the data was compressed + uint32 compression_id = 13; +} diff --git a/proto/kyve/bundles/v1beta1/events.proto b/proto/kyve/bundles/v1beta1/events.proto new file mode 100644 index 00000000..6bc7b503 --- /dev/null +++ b/proto/kyve/bundles/v1beta1/events.proto @@ -0,0 +1,136 @@ +syntax = "proto3"; + +package kyve.bundles.v1beta1; + +import "kyve/bundles/v1beta1/bundles.proto"; +import "kyve/bundles/v1beta1/tx.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/bundles/types"; + +// EventBundleVote is an event emitted when a protocol node votes on a bundle. +// emitted_by: MsgVoteBundleProposal +message EventBundleVote { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the account staker of the protocol node. + string staker = 2; + // storage_id is the unique ID of the bundle. + string storage_id = 3; + // vote is for what the validator voted with + VoteType vote = 4; +} + +// EventBundleProposed is submitted by the MsgSubmitBundleProposal message +// emitted_by: MsgSubmitBundleProposal +message EventBundleProposed { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // internal id for the KYVE-bundle + uint64 id = 2; + // storage_id is the ID to retrieve to data item from the configured storage provider + // e.g. the ARWEAVE-id + string storage_id = 3; + // Address of the uploader/proposer of the bundle + string uploader = 4; + // data_size size in bytes of the data + uint64 data_size = 5; + // from_index starting index of the bundle (inclusive) + uint64 from_index = 6; + // bundle_size amount of data items in the bundle + uint64 bundle_size = 7; + // from_key the key of the first data item in the bundle + string from_key = 8; + // to_key the key of the last data item in the bundle + string to_key = 9; + // bundle_summary is a short string holding some useful information of + // the bundle which will get stored on-chain + string bundle_summary = 10; + // data_hash is a sha256 hash of the raw compressed data + string data_hash = 11; + // proposed_at the unix time when the bundle was proposed + uint64 proposed_at = 12; + // storage_provider_id the unique id of the storage provider where + // the data of the bundle is tored + uint32 storage_provider_id = 13; + // compression_id the unique id of the compression type the data + // of the bundle was compressed with + uint32 compression_id = 14; +} + +// EventBundleFinalized is an event emitted when a bundle is finalised. +// emitted_by: MsgSubmitBundleProposal, EndBlock +message EventBundleFinalized { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // internal id for the KYVE-bundle + uint64 id = 2; + // total voting power which voted for valid + uint64 valid = 3; + // total voting power which voted for invalid + uint64 invalid = 4; + // total voting power which voted for abstain + uint64 abstain = 5; + // total voting power of the pool + uint64 total = 6; + // status of the finalized bundle + BundleStatus status = 7; + // rewards transferred to treasury (in ukyve) + uint64 reward_treasury = 8; + // rewardUploader rewards directly transferred to uploader (in ukyve) + uint64 reward_uploader = 9; + // rewardDelegation rewards distributed among all delegators (in ukyve) + uint64 reward_delegation = 10; + // rewardTotal the total bundle reward + uint64 reward_total = 11; + // finalized_at the block height where the bundle got finalized + uint64 finalized_at = 12; + // uploader the address of the uploader of this bundle + string uploader = 13; + // next_uploader the address of the next uploader after this bundle + string next_uploader = 14; +} + +// EventClaimedUploaderRole is an event emitted when an uploader claims the uploader role +// emitted_by: MsgClaimUploaderRole +message EventClaimedUploaderRole { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // id internal id for the KYVE-bundle + uint64 id = 2; + // new_uploader the address of the participant who claimed + // the free uploader role + string new_uploader = 3; +} + +// EventSkippedUploaderRole is an event emitted when an uploader skips the upload +// emitted_by: MsgSkipUploaderRole +message EventSkippedUploaderRole { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // id internal id for the KYVE-bundle + uint64 id = 2; + // previous_uploader is the address of the staker who skipped his uploader role + string previous_uploader = 3; + // new_uploader is the address of the new uploader who got automatically selected + string new_uploader = 4; +} + +// EventPointIncreased is an event emitted when a staker receives a point +// emitted_by: MsgSubmitBundleProposal, EndBlock +message EventPointIncreased { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the address of the staker who received the point + string staker = 2; + // current_points is the amount of points the staker has now + uint64 current_points = 3; +} + +// EventPointIncreased is an event emitted when a staker receives a point +// emitted_by: MsgSubmitBundleProposal, EndBlock +message EventPointsReset { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the address of the staker who has zero points now + string staker = 2; +} diff --git a/proto/kyve/bundles/v1beta1/genesis.proto b/proto/kyve/bundles/v1beta1/genesis.proto new file mode 100644 index 00000000..620274cb --- /dev/null +++ b/proto/kyve/bundles/v1beta1/genesis.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package kyve.bundles.v1beta1; + +import "gogoproto/gogo.proto"; +import "kyve/bundles/v1beta1/bundles.proto"; +import "kyve/bundles/v1beta1/params.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/bundles/types"; + +// GenesisState defines the bundles module's genesis state. +message GenesisState { + // params defines all the parameters of the module. + Params params = 1 [(gogoproto.nullable) = false]; + // bundle_proposal_list ... + repeated BundleProposal bundle_proposal_list = 2 [(gogoproto.nullable) = false]; + // finalized_bundle_list ... + repeated FinalizedBundle finalized_bundle_list = 3 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/bundles/v1beta1/params.proto b/proto/kyve/bundles/v1beta1/params.proto new file mode 100644 index 00000000..593b1e96 --- /dev/null +++ b/proto/kyve/bundles/v1beta1/params.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package kyve.bundles.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/bundles/types"; + +// Params defines the bundles module parameters. +message Params { + // upload_timeout ... + uint64 upload_timeout = 1; + // storage_cost ... + uint64 storage_cost = 2; + // network_fee ... + string network_fee = 3; + // max_points ... + uint64 max_points = 4; +} diff --git a/proto/kyve/bundles/v1beta1/query.proto b/proto/kyve/bundles/v1beta1/query.proto new file mode 100644 index 00000000..ba64d1e0 --- /dev/null +++ b/proto/kyve/bundles/v1beta1/query.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package kyve.bundles.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/bundles/v1beta1/params.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/bundles/types"; + +// Query defines the gRPC querier service. +service Query { + // Parameters queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/kyve/bundles/v1beta1/params"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/bundles/v1beta1/tx.proto b/proto/kyve/bundles/v1beta1/tx.proto new file mode 100644 index 00000000..4b237ca5 --- /dev/null +++ b/proto/kyve/bundles/v1beta1/tx.proto @@ -0,0 +1,124 @@ +syntax = "proto3"; + +package kyve.bundles.v1beta1; + +import "cosmos_proto/cosmos.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/bundles/types"; + +// Msg defines the Msg service. +service Msg { + // SubmitBundleProposal ... + rpc SubmitBundleProposal(MsgSubmitBundleProposal) returns (MsgSubmitBundleProposalResponse); + // VoteBundleProposal ... + rpc VoteBundleProposal(MsgVoteBundleProposal) returns (MsgVoteBundleProposalResponse); + // ClaimUploaderRole ... + rpc ClaimUploaderRole(MsgClaimUploaderRole) returns (MsgClaimUploaderRoleResponse); + // SkipUploaderRole ... + rpc SkipUploaderRole(MsgSkipUploaderRole) returns (MsgSkipUploaderRoleResponse); + + // UpdateParams defines a governance operation for updating the x/bundles module + // parameters. The authority is hard-coded to the x/gov module account. + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} + +// MsgSubmitBundleProposal defines a SDK message for submitting a bundle proposal. +message MsgSubmitBundleProposal { + // creator ... + string creator = 1; + // staker ... + string staker = 2; + // pool_id ... + uint64 pool_id = 3; + // storage_id ... + string storage_id = 4; + // data_size ... + uint64 data_size = 5; + // data_hash ... + string data_hash = 6; + // from_index ... + uint64 from_index = 7; + // bundle_size ... + uint64 bundle_size = 8; + // from_key + string from_key = 9; + // to_key ... + string to_key = 10; + // bundle_summary ... + string bundle_summary = 11; +} + +// MsgSubmitBundleProposalResponse defines the Msg/SubmitBundleProposal response type. +message MsgSubmitBundleProposalResponse {} + +// VoteType ... +enum VoteType { + option (gogoproto.goproto_enum_prefix) = false; + + // VOTE_TYPE_UNSPECIFIED ... + VOTE_TYPE_UNSPECIFIED = 0; + // VOTE_TYPE_VALID ... + VOTE_TYPE_VALID = 1; + // VOTE_TYPE_INVALID ... + VOTE_TYPE_INVALID = 2; + // VOTE_TYPE_ABSTAIN ... + VOTE_TYPE_ABSTAIN = 3; +} + +// MsgVoteBundleProposal defines a SDK message for voting on a bundle proposal. +message MsgVoteBundleProposal { + // creator ... + string creator = 1; + // staker ... + string staker = 2; + // id ... + uint64 pool_id = 3; + // storage_id ... + string storage_id = 4; + // vote ... + VoteType vote = 5; +} + +// MsgVoteBundleProposalResponse defines the Msg/VoteBundleProposal response type. +message MsgVoteBundleProposalResponse {} + +// MsgClaimUploaderRole defines a SDK message for claiming the uploader role. +message MsgClaimUploaderRole { + // creator ... + string creator = 1; + // staker ... + string staker = 2; + // id ... + uint64 pool_id = 3; +} + +// MsgClaimUploaderRoleResponse defines the Msg/ClaimUploaderRole response type. +message MsgClaimUploaderRoleResponse {} + +// MsgSubmitBundleProposal defines a SDK message for submitting a bundle proposal. +message MsgSkipUploaderRole { + // creator ... + string creator = 1; + // staker ... + string staker = 2; + // pool_id ... + uint64 pool_id = 3; + // from_index ... + uint64 from_index = 4; +} + +// MsgSubmitBundleProposalResponse defines the Msg/SubmitBundleProposal response type. +message MsgSkipUploaderRoleResponse {} + +// MsgUpdateParams defines a SDK message for updating the module parameters. +message MsgUpdateParams { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // payload defines the x/bundles parameters to update. + string payload = 2; +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +message MsgUpdateParamsResponse {} diff --git a/proto/kyve/delegation/v1beta1/delegation.proto b/proto/kyve/delegation/v1beta1/delegation.proto new file mode 100644 index 00000000..4384cbb8 --- /dev/null +++ b/proto/kyve/delegation/v1beta1/delegation.proto @@ -0,0 +1,123 @@ +syntax = "proto3"; + +package kyve.delegation.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/delegation/types"; + +// Delegator stores the information that one address has delegated to another address +// It stores important information for the F1-Fee distribution algorithm +message Delegator { + // staker corresponds to a KYVE-staker on the protocol-side + string staker = 1; + // delegator the user who delegate to the staker. + // If staker and delegator are the same we call it: self-delegation + string delegator = 2; + // k_index is an internal index for the f1-distribution algorithm + uint64 k_index = 3; + // initial_amount of stake the user had when it delegated. + // slashes can cause that the actual stake is lower. + uint64 initial_amount = 4; +} + +// DelegationEntry represents an entry according to the F1-Fee-Distribution algorithm. +// Take a look at x/delegation/keeper/logic_f1distribution.go for more details +message DelegationEntry { + // staker on protocol level + string staker = 1; + + // k_index is the of the period this entry ends + uint64 k_index = 2; + + // value is the quotient of collected rewards and total stake according to F1-distribution + string value = 3 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; +} + +// DelegationPoolData stores general delegation information for every staker +message DelegationData { + // Primary keys + + // Every staker has one DelegationData + string staker = 1; + + // F1Distribution + + // current_rewards ... + uint64 current_rewards = 2; + // total_delegation ... + uint64 total_delegation = 3; + // latest_index_k ... + uint64 latest_index_k = 4; + + // Stats + // These are not required by the f1-algorithm and are + // used for queries and state cleanup + + // delegator_count the amount of different addresses delegating to the staker + uint64 delegator_count = 5; + // latest_index_was_undelegation helps indicates when an entry can be deleted + bool latest_index_was_undelegation = 6; +} + +// DelegationSlash represents an f1-slash +// these entries needs to be iterated to obtain the current amount of the actual stake +// Every staker can have n slash-entries +message DelegationSlash { + // staker who got slashed + string staker = 1; + // k_index for f1-algorithm + uint64 k_index = 2; + // fraction that got slashed + string fraction = 3 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; +} + +// UndelegationQueueEntry ... +message UndelegationQueueEntry { + // index ... + uint64 index = 1; + // staker ... + string staker = 2; + // delegator ... + string delegator = 3; + // amount ... + uint64 amount = 4; + // creation_time ... + uint64 creation_time = 5; +} + +// QueueState ... +message QueueState { + // low_index ... + uint64 low_index = 1; + // high_index ... + uint64 high_index = 2; +} + +// RedelegationCooldown ... +message RedelegationCooldown { + // low_index ... + string address = 1; + // high_index ... + uint64 creation_date = 2; +} + +// SlashType ... +enum SlashType { + option (gogoproto.goproto_enum_prefix) = false; + + // SLASH_TYPE_UNSPECIFIED ... + SLASH_TYPE_UNSPECIFIED = 0; + // SLASH_TYPE_TIMEOUT ... + SLASH_TYPE_TIMEOUT = 1; + // SLASH_TYPE_VOTE ... + SLASH_TYPE_VOTE = 2; + // SLASH_TYPE_UPLOAD ... + SLASH_TYPE_UPLOAD = 3; +} diff --git a/proto/kyve/delegation/v1beta1/events.proto b/proto/kyve/delegation/v1beta1/events.proto new file mode 100644 index 00000000..13b45fbd --- /dev/null +++ b/proto/kyve/delegation/v1beta1/events.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; + +package kyve.delegation.v1beta1; + +import "kyve/delegation/v1beta1/delegation.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/delegation/types"; + +// ---------- Delegating Events ---------- + +// EventDelegate is an event emitted when someone delegates to a protocol node. +// emitted_by: MsgDelegate +message EventDelegate { + // address is the account address of the delegator. + string address = 1; + // staker is the account address of the protocol node. + string staker = 2; + // amount ... + uint64 amount = 3; +} + +// EventUndelegate is an event emitted when someone undelegates from a protocol node. +// emitted_by: EndBlock +message EventUndelegate { + // address is the account address of the delegator. + string address = 1; + // staker is the account address of the protocol node. + string staker = 2; + // amount ... + uint64 amount = 3; +} + +// EventRedelegate is an event emitted when someone redelegates from one protocol node to another. +// emitted_by: MsgRedelegate +message EventRedelegate { + // address is the account address of the delegator. + string address = 1; + // from_staker ... + string from_staker = 2; + // to_staker is the account address of the new staker in the the pool + string to_staker = 3; + // amount ... + uint64 amount = 4; +} + +// EventWithdrawRewards ... +// emitted_by: MsgRedelegate, MsgDelegate, MsgWithdrawRewards, EndBlock +message EventWithdrawRewards { + // address is the account address of the delegator. + string address = 1; + // staker is the account address of the protocol node the users withdraws from. + string staker = 2; + // amount ... + uint64 amount = 3; +} + +// EventSlash is an event emitted when a protocol node is slashed. +// emitted_by: MsgSubmitBundleProposal, EndBlock +message EventSlash { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the account address of the protocol node. + string staker = 2; + // amount ... + uint64 amount = 3; + // slash_type + SlashType slash_type = 4; +} diff --git a/proto/kyve/delegation/v1beta1/genesis.proto b/proto/kyve/delegation/v1beta1/genesis.proto new file mode 100644 index 00000000..4038bdc7 --- /dev/null +++ b/proto/kyve/delegation/v1beta1/genesis.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package kyve.delegation.v1beta1; + +import "gogoproto/gogo.proto"; +import "kyve/delegation/v1beta1/delegation.proto"; +import "kyve/delegation/v1beta1/params.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/delegation/types"; + +// GenesisState defines the delegation module's genesis state. +message GenesisState { + // params defines all the parameters of the module. + Params params = 1 [(gogoproto.nullable) = false]; + // delegator_list ... + repeated Delegator delegator_list = 2 [(gogoproto.nullable) = false]; + // delegation_entry_list ... + repeated DelegationEntry delegation_entry_list = 3 [(gogoproto.nullable) = false]; + // delegation_data_list ... + repeated DelegationData delegation_data_list = 4 [(gogoproto.nullable) = false]; + // delegation_slash_list ... + repeated DelegationSlash delegation_slash_list = 5 [(gogoproto.nullable) = false]; + // undelegation_queue_entry_list ... + repeated UndelegationQueueEntry undelegation_queue_entry_list = 6 [(gogoproto.nullable) = false]; + // queue_state_undelegation ... + QueueState queue_state_undelegation = 7 [(gogoproto.nullable) = false]; + // redelegation_cooldown_list ... + repeated RedelegationCooldown redelegation_cooldown_list = 8 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/delegation/v1beta1/params.proto b/proto/kyve/delegation/v1beta1/params.proto new file mode 100644 index 00000000..1cf3053b --- /dev/null +++ b/proto/kyve/delegation/v1beta1/params.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package kyve.delegation.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/delegation/types"; + +// Params defines the delegation module parameters. +message Params { + // unbonding_delegation_time ... + uint64 unbonding_delegation_time = 1; + // unbonding_delegation_time ... + uint64 redelegation_cooldown = 2; + // unbonding_delegation_time ... + uint64 redelegation_max_amount = 3; + // vote_slash ... + string vote_slash = 4; + // upload_slash ... + string upload_slash = 5; + // timeout_slash ... + string timeout_slash = 6; +} diff --git a/proto/kyve/delegation/v1beta1/query.proto b/proto/kyve/delegation/v1beta1/query.proto new file mode 100644 index 00000000..99ac0940 --- /dev/null +++ b/proto/kyve/delegation/v1beta1/query.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package kyve.delegation.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/delegation/v1beta1/params.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/delegation/types"; + +/** + Contains the native params query. + Everything else is provided by the query module. The reason for this + is that a lot of queries require composite data from all modules. +*/ + +// Query defines the gRPC querier service. +service Query { + // Parameters queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/kyve/delegation/v1beta1/params"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/delegation/v1beta1/tx.proto b/proto/kyve/delegation/v1beta1/tx.proto new file mode 100644 index 00000000..348aa4e7 --- /dev/null +++ b/proto/kyve/delegation/v1beta1/tx.proto @@ -0,0 +1,88 @@ +syntax = "proto3"; + +package kyve.delegation.v1beta1; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/delegation/types"; + +// Msg defines the Msg service. +service Msg { + // Delegate ... + rpc Delegate(MsgDelegate) returns (MsgDelegateResponse); + // Withdraw ... + rpc WithdrawRewards(MsgWithdrawRewards) returns (MsgWithdrawRewardsResponse); + // Undelegate ... + rpc Undelegate(MsgUndelegate) returns (MsgUndelegateResponse); + // Redelegate ... + rpc Redelegate(MsgRedelegate) returns (MsgRedelegateResponse); + + // UpdateParams defines a governance operation for updating the x/delegation module + // parameters. The authority is hard-coded to the x/gov module account. + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} + +// MsgDelegate ... +message MsgDelegate { + // creator ... + string creator = 1; + // staker ... + string staker = 2; + // amount ... + uint64 amount = 3; +} + +// MsgDelegatePoolResponse defines the Msg/DelegatePool response type. +message MsgDelegateResponse {} + +// MsgWithdrawPool defines a SDK message for withdrawing delegation rewards from a specific pool. +message MsgWithdrawRewards { + // creator ... + string creator = 1; + // staker ... + string staker = 2; +} + +// MsgWithdrawPoolResponse defines the Msg/WithdrawPool response type. +message MsgWithdrawRewardsResponse {} + +// MsgUndelegatePool defines a SDK message for undelegating from a specific pool. +message MsgUndelegate { + // creator ... + string creator = 1; + // staker ... + string staker = 2; + // amount ... + uint64 amount = 3; +} + +// MsgUndelegatePoolResponse defines the Msg/UndelegatePool response type. +message MsgUndelegateResponse {} + +// MsgRedelegatePool defines a SDK message for redelegating from a +// staker in a pool to another staker in the same or another pool +message MsgRedelegate { + // creator ... + string creator = 1; + // staker ... + string from_staker = 2; + // staker ... + string to_staker = 3; + // amount ... + uint64 amount = 4; +} + +// MsgUndelegatePoolResponse defines the Msg/UndelegatePool response type. +message MsgRedelegateResponse {} + +// MsgUpdateParams defines a SDK message for updating the module parameters. +message MsgUpdateParams { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // payload defines the x/delegation parameters to update. + string payload = 2; +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +message MsgUpdateParamsResponse {} diff --git a/proto/kyve/global/v1beta1/genesis.proto b/proto/kyve/global/v1beta1/genesis.proto new file mode 100644 index 00000000..5eabbce5 --- /dev/null +++ b/proto/kyve/global/v1beta1/genesis.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package kyve.global.v1beta1; + +import "gogoproto/gogo.proto"; +import "kyve/global/v1beta1/global.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/global/types"; + +// GenesisState defines the global module's genesis state. +message GenesisState { + // params defines all the parameters of the module. + Params params = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/global/v1beta1/global.proto b/proto/kyve/global/v1beta1/global.proto new file mode 100644 index 00000000..91b567ab --- /dev/null +++ b/proto/kyve/global/v1beta1/global.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package kyve.global.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/global/types"; + +// Params defines the global module parameters. +message Params { + // min_gas_price defines the minimum gas price value for all transactions. + string min_gas_price = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; + // burn_ratio defines the ratio of transaction fees burnt. + string burn_ratio = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; + + // gas_adjustments can add a constant amount of gas to a specific message type. + // This gives more control to make certain messages more expensive to avoid spamming + // of certain types of messages. + repeated GasAdjustment gas_adjustments = 3 [(gogoproto.nullable) = false]; + + // gas_refunds lets the governance specify a fraction of how much gas + // a user gets refunded for a certain type of transaction. + // This could be used to make transactions which support to network cheaper. + // Gas refunds only work if the transaction only included one message. + repeated GasRefund gas_refunds = 4 [(gogoproto.nullable) = false]; + + // min_initial_deposit_ratio sets a minimum fraction of initial deposit for a + // governance proposal. This is used to avoid spamming of proposals and + // polluting the proposals page. + string min_initial_deposit_ratio = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; +} + +// GasAdjustment stores for every message type a fixed amount +// of gas which is added to the message +message GasAdjustment { + // type of the sdk-message + string type = 1; + // amount of gas which is added to the message + uint64 amount = 2; +} + +// GasRefund stores the fraction of gas which will be refunded for a given +// type of message. +// This only works if the transaction only includes one message. +message GasRefund { + // type of the sdk-message + string type = 1; + // fraction in decimal representation between 0 and 1 + string fraction = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; +} diff --git a/proto/kyve/global/v1beta1/query.proto b/proto/kyve/global/v1beta1/query.proto new file mode 100644 index 00000000..1bf65f0d --- /dev/null +++ b/proto/kyve/global/v1beta1/query.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package kyve.global.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/global/v1beta1/global.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/global/types"; + +// Query defines the gRPC querier service. +service Query { + // Parameters queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/kyve/global/v1beta1/params"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/global/v1beta1/tx.proto b/proto/kyve/global/v1beta1/tx.proto new file mode 100644 index 00000000..e0e3f31b --- /dev/null +++ b/proto/kyve/global/v1beta1/tx.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package kyve.global.v1beta1; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/global/types"; + +// Msg defines the Msg service. +service Msg { + // UpdateParams defines a governance operation for updating the x/global + // module parameters. The authority is hard-coded to the x/gov module + // account. + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} + +// MsgUpdateParams defines a SDK message for updating the module parameters. +message MsgUpdateParams { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // payload defines the x/global parameters to update. + string payload = 2; +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +message MsgUpdateParamsResponse {} diff --git a/proto/kyve/pool/v1beta1/events.proto b/proto/kyve/pool/v1beta1/events.proto new file mode 100644 index 00000000..12e2ce0b --- /dev/null +++ b/proto/kyve/pool/v1beta1/events.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package kyve.pool.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/pool/types"; + +// EventCreatePool ... +// emitted_by: EndBlock(gov) +message EventCreatePool { + // id is the unique ID of the pool. + uint64 id = 1; + // name is the human readable name of the pool + string name = 2; + // runtime is the runtime name of the pool + string runtime = 3; + // logo is the logo url of the pool + string logo = 4; + // config is either a json stringified config or an + // external link pointing to the config + string config = 5; + // start_key is the first key the pool should start + // indexing + string start_key = 6; + // upload_interval is the interval the pool should validate + // bundles with + uint64 upload_interval = 7; + // operating_cost is the fixed cost which gets paid out + // to every successful uploader + uint64 operating_cost = 8; + // min_delegation is the minimum amount of $KYVE the pool has + // to have in order to produce bundles + uint64 min_delegation = 9; + // max_bundle_size is the max size a data bundle can have + // (amount of data items) + uint64 max_bundle_size = 10; + // version is the current version of the protocol nodes + string version = 11; + // binaries points to the current binaries of the protocol node + string binaries = 12; + // storage_provider_id is the unique id of the storage provider + // the pool is archiving the data on + uint32 storage_provider_id = 13; + // compression_id is the unique id of the compression type the bundles + // get compressed with + uint32 compression_id = 14; +} + +// EventFundPool is an event emitted when a pool is funded. +// emitted_by: MsgFundPool +message EventFundPool { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // address is the account address of the pool funder. + string address = 2; + // amount is the amount in ukyve the funder has funded + uint64 amount = 3; +} + +// EventDefundPool is an event emitted when a pool is defunded. +// emitted_by: MsgDefundPool +message EventDefundPool { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // address is the account address of the pool funder. + string address = 2; + // amount is the amount in ukyve the funder has defunded + uint64 amount = 3; +} + +// EventDefundPool is an event emitted when a pool is defunded. +// emitted_by: MsgSubmitBundleProposal +message EventPoolFundsSlashed { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // address is the account address of the pool funder. + string address = 2; + // amount is the amount in ukyve the validator has lost due to the slash + uint64 amount = 3; +} + +// EventPoolOutOfFunds is an event emitted when a pool has run out of funds +// emitted_by: MsgSubmitBundleProposal +message EventPoolOutOfFunds { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; +} diff --git a/proto/kyve/pool/v1beta1/genesis.proto b/proto/kyve/pool/v1beta1/genesis.proto new file mode 100644 index 00000000..0051e4ac --- /dev/null +++ b/proto/kyve/pool/v1beta1/genesis.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package kyve.pool.v1beta1; + +import "gogoproto/gogo.proto"; +import "kyve/pool/v1beta1/pool.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/pool/types"; + +// GenesisState defines the pool module's genesis state. +message GenesisState { + reserved 1; + reserved "params"; + + // pool_list ... + repeated kyve.pool.v1beta1.Pool pool_list = 2 [(gogoproto.nullable) = false]; + // pool_count ... + uint64 pool_count = 3; +} diff --git a/proto/kyve/pool/v1beta1/pool.proto b/proto/kyve/pool/v1beta1/pool.proto new file mode 100644 index 00000000..77541e34 --- /dev/null +++ b/proto/kyve/pool/v1beta1/pool.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +package kyve.pool.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/pool/types"; + +// PoolStatus ... +enum PoolStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // POOL_STATUS_UNSPECIFIED ... + POOL_STATUS_UNSPECIFIED = 0; + // POOL_STATUS_ACTIVE ... + POOL_STATUS_ACTIVE = 1; + // POOL_STATUS_DISABLED ... + POOL_STATUS_DISABLED = 2; + // POOL_STATUS_NO_FUNDS ... + POOL_STATUS_NO_FUNDS = 3; + // POOL_STATUS_NOT_ENOUGH_DELEGATION ... + POOL_STATUS_NOT_ENOUGH_DELEGATION = 4; + // POOL_STATUS_UPGRADING ... + POOL_STATUS_UPGRADING = 5; +} + +// Protocol holds all info about the current pool version and the +// available binaries for participating as a validator in a pool +message Protocol { + // version holds the current software version tag of the pool binaries + string version = 1; + // binaries is a stringified json object which holds binaries in the + // current version for multiple platforms and architectures + string binaries = 2; + // last_upgrade is the unix time the pool was upgraded the last time + uint64 last_upgrade = 3; +} + +// Upgrade holds all info when a pool has a scheduled upgrade +message UpgradePlan { + // version is the new software version tag of the upgrade + string version = 1; + // binaries is the new stringified json object which holds binaries in the + // upgrade version for multiple platforms and architectures + string binaries = 2; + // scheduled_at is the unix time the upgrade is supposed to be done + uint64 scheduled_at = 3; + // duration is the time in seconds how long the pool should halt + // during the upgrade to give all validators a chance of switching + // to the new binaries + uint64 duration = 4; +} + +// Funder is the object which holds info about a single pool funder +message Funder { + // address is the address of the funder + string address = 1; + // amount is the current amount of funds in ukyve the funder has + // still funded the pool with + uint64 amount = 2; +} + +// Pool ... +message Pool { + // id - unique identifier of the pool, can not be changed + uint64 id = 1; + // name is a human readable name for the pool + string name = 2; + // runtime specified which protocol and which version needs is required + string runtime = 3; + // logo is a link to an image file + string logo = 4; + // config is either a JSON encoded string or a link to an external storage provider. + // This is up to the implementation of the protocol node. + string config = 5; + + // start_key ... + string start_key = 6; + // current_key ... + string current_key = 7; + // current_summary ... + string current_summary = 8; + // current_index ... + uint64 current_index = 9; + + // total_bundles is the number of total finalized bundles + uint64 total_bundles = 10; + + // upload_interval ... + uint64 upload_interval = 11; + // operating_cost ... + uint64 operating_cost = 12; + // min_delegation ... + uint64 min_delegation = 13; + // max_bundle_size ... + uint64 max_bundle_size = 14; + + // disabled is true when the pool is disabled. + // Can only be done via governance. + bool disabled = 15; + + // funders ... + repeated Funder funders = 16; + // total_funds ... + uint64 total_funds = 17; + + // protocol ... + Protocol protocol = 18; + // upgrade_plan ... + UpgradePlan upgrade_plan = 19; + + // storage_provider_id ... + uint32 current_storage_provider_id = 20; + // compression_id ... + uint32 current_compression_id = 21; +} diff --git a/proto/kyve/pool/v1beta1/tx.proto b/proto/kyve/pool/v1beta1/tx.proto new file mode 100644 index 00000000..4ca6223d --- /dev/null +++ b/proto/kyve/pool/v1beta1/tx.proto @@ -0,0 +1,160 @@ +syntax = "proto3"; + +package kyve.pool.v1beta1; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/pool/types"; + +// Msg defines the Msg service. +service Msg { + // FundPool ... + rpc FundPool(MsgFundPool) returns (MsgFundPoolResponse); + // DefundPool ... + rpc DefundPool(MsgDefundPool) returns (MsgDefundPoolResponse); + + // CreatePool defines a governance operation for creating a new pool. + // The authority is hard-coded to the x/gov module account. + rpc CreatePool(MsgCreatePool) returns (MsgCreatePoolResponse); + // UpdatePool defines a governance operation for updating an existing pool. + // The authority is hard-coded to the x/gov module account. + rpc UpdatePool(MsgUpdatePool) returns (MsgUpdatePoolResponse); + // DisablePool defines a governance operation for disabling an existing pool. + // The authority is hard-coded to the x/gov module account. + rpc DisablePool(MsgDisablePool) returns (MsgDisablePoolResponse); + // EnablePool defines a governance operation for enabling an existing pool. + // The authority is hard-coded to the x/gov module account. + rpc EnablePool(MsgEnablePool) returns (MsgEnablePoolResponse); + // ScheduleRuntimeUpgrade defines a governance operation for scheduling a runtime upgrade. + // The authority is hard-coded to the x/gov module account. + rpc ScheduleRuntimeUpgrade(MsgScheduleRuntimeUpgrade) returns (MsgScheduleRuntimeUpgradeResponse); + // CancelRuntimeUpgrade defines a governance operation for cancelling a runtime upgrade. + // The authority is hard-coded to the x/gov module account. + rpc CancelRuntimeUpgrade(MsgCancelRuntimeUpgrade) returns (MsgCancelRuntimeUpgradeResponse); +} + +// MsgFundPool defines a SDK message for funding a pool. +message MsgFundPool { + // creator ... + string creator = 1; + // id ... + uint64 id = 2; + // amount ... + uint64 amount = 3; +} + +// MsgFundPoolResponse defines the Msg/DefundPool response type. +message MsgFundPoolResponse {} + +// MsgDefundPool defines a SDK message for defunding a pool. +message MsgDefundPool { + // creator ... + string creator = 1; + // id ... + uint64 id = 2; + // amount ... + uint64 amount = 3; +} + +// MsgDefundPoolResponse defines the Msg/DefundPool response type. +message MsgDefundPoolResponse {} + +// MsgCreatePool defines a SDK message for creating a new pool. +message MsgCreatePool { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // name ... + string name = 2; + // runtime ... + string runtime = 3; + // logo ... + string logo = 4; + // config ... + string config = 5; + // start_key ... + string start_key = 6; + // upload_interval ... + uint64 upload_interval = 7; + // operating_cost ... + uint64 operating_cost = 8; + // min_delegation ... + uint64 min_delegation = 9; + // max_bundle_size ... + uint64 max_bundle_size = 10; + // version ... + string version = 11; + // binaries ... + string binaries = 12; + // storage_provider_id ... + uint32 storage_provider_id = 13; + // compression_id ... + uint32 compression_id = 14; +} + +// MsgCreatePoolResponse defines the Msg/CreatePool response type. +message MsgCreatePoolResponse {} + +// MsgUpdatePool defines a SDK message for updating an existing pool. +message MsgUpdatePool { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // id ... + uint64 id = 2; + // payload ... + string payload = 3; +} + +// MsgUpdatePoolResponse defines the Msg/UpdatePool response type. +message MsgUpdatePoolResponse {} + +// MsgDisablePool defines a SDK message for disabling an existing pool. +message MsgDisablePool { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // id ... + uint64 id = 2; +} + +// MsgDisablePoolResponse defines the Msg/DisablePool response type. +message MsgDisablePoolResponse {} + +// MsgEnablePool defines a SDK message for enabling an existing pool. +message MsgEnablePool { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // id ... + uint64 id = 2; +} + +// MsgEnablePoolResponse defines the Msg/EnablePool response type. +message MsgEnablePoolResponse {} + +// MsgScheduleRuntimeUpgrade defines a SDK message for scheduling a runtime upgrade. +message MsgScheduleRuntimeUpgrade { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // runtime ... + string runtime = 2; + // version ... + string version = 3; + // scheduled_at ... + uint64 scheduled_at = 4; + // duration ... + uint64 duration = 5; + // binaries ... + string binaries = 6; +} + +// MsgScheduleRuntimeUpgradeResponse defines the Msg/ScheduleRuntimeUpgrade response type. +message MsgScheduleRuntimeUpgradeResponse {} + +// MsgCancelRuntimeUpgrade defines a SDK message for cancelling a runtime upgrade. +message MsgCancelRuntimeUpgrade { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // runtime ... + string runtime = 2; +} + +// MsgCancelRuntimeUpgradeResponse defines the Msg/CancelRuntimeUpgrade response type. +message MsgCancelRuntimeUpgradeResponse {} diff --git a/proto/kyve/query/v1beta1/account.proto b/proto/kyve/query/v1beta1/account.proto new file mode 100644 index 00000000..cb0dd48a --- /dev/null +++ b/proto/kyve/query/v1beta1/account.proto @@ -0,0 +1,146 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/query/v1beta1/query.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +// QueryDelegation contains all rpc requests related to direct delegation data +service QueryAccount { + // AccountAssets returns an overview of the sum of all balances for a given user. e.g. balance, staking, funding, etc. + rpc AccountAssets(QueryAccountAssetsRequest) returns (QueryAccountAssetsResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/account_assets/{address}"; + } + + // AccountDelegationUnbondings ... + rpc AccountDelegationUnbondings(QueryAccountDelegationUnbondingsRequest) returns (QueryAccountDelegationUnbondingsResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/account_delegation_unbondings/{address}"; + } + + // AccountFundedList returns all pools the given user has funded into. + rpc AccountFundedList(QueryAccountFundedListRequest) returns (QueryAccountFundedListResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/account_funded_list/{address}"; + } + + // AccountRedelegation ... + rpc AccountRedelegation(QueryAccountRedelegationRequest) returns (QueryAccountRedelegationResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/account_redelegation/{address}"; + } +} + +// ======================== +// account_assets/{address} +// ======================== + +// QueryAccountAssetsRequest is the request type for the Query/AccountAssets RPC method. +message QueryAccountAssetsRequest { + // address ... + string address = 1; +} + +// QueryAccountAssetsResponse is the response type for the Query/AccountAssets RPC method. +message QueryAccountAssetsResponse { + // balance ... + uint64 balance = 1; + // protocol_staking ... + uint64 protocol_self_delegation = 2; + // protocol_staking_unbonding + uint64 protocol_self_delegation_unbonding = 3; + // protocol_delegation ... + uint64 protocol_delegation = 4; + // protocol_delegation_unbonding + uint64 protocol_delegation_unbonding = 5; + // protocol_rewards ... + uint64 protocol_rewards = 6; + // protocol_funding ... + uint64 protocol_funding = 7; +} + +// ======================================= +// account_delegation_unbondings/{address} +// ======================================= + +// QueryAccountFundedListRequest ... +message QueryAccountDelegationUnbondingsRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; + // address ... + string address = 2; +} + +// QueryAccountAssetsResponse is the response type for the Query/AccountAssets RPC method. +message QueryAccountDelegationUnbondingsResponse { + // balance ... + repeated DelegationUnbonding unbondings = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryAccountAssetsResponse is the response type for the Query/AccountAssets RPC method. +message DelegationUnbonding { + // amount + uint64 amount = 1; + // creation_time + uint64 creation_time = 2; + // staker + FullStaker staker = 3; +} + +// ============================= +// account_funded_list/{address} +// ============================= + +// QueryAccountFundedListRequest is the request type for the account queries with pagination +message QueryAccountFundedListRequest { + // address ... + string address = 1; + // pagination defines an optional pagination for the request. + // cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryAccountFundedListResponse is the response type for the Query/AccountFundedList RPC method. +message QueryAccountFundedListResponse { + // funded ... + repeated Funded funded = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + // cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// Funded ... +message Funded { + // amount ... + uint64 amount = 1; + // pool ... + BasicPool pool = 2; +} + +// ============================== +// account_redelegation/{address} +// ============================== + +// QueryAccountDelegationListRequest ... +message QueryAccountRedelegationRequest { + // address ... + string address = 1; +} + +// QueryAccountDelegationListRequest is the response type for the Query/AccountDelegationList RPC method. +message QueryAccountRedelegationResponse { + // redelegation_cooldown_entries ... + repeated RedelegationEntry redelegation_cooldown_entries = 1 [(gogoproto.nullable) = false]; + + // availableSlots ... + uint64 available_slots = 2; +} + +// RedelegationEntry ... +message RedelegationEntry { + // creation_date ... + uint64 creation_date = 1; + // finish_date ... + uint64 finish_date = 2; +} diff --git a/proto/kyve/query/v1beta1/bundles.proto b/proto/kyve/query/v1beta1/bundles.proto new file mode 100644 index 00000000..60b731f3 --- /dev/null +++ b/proto/kyve/query/v1beta1/bundles.proto @@ -0,0 +1,194 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/bundles/v1beta1/bundles.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +// QueryDelegation contains all rpc requests related to direct delegation data +service QueryBundles { + // FinalizedBundles ... + rpc FinalizedBundles(QueryFinalizedBundlesRequest) returns (QueryFinalizedBundlesResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/finalized_bundles/{pool_id}"; + } + + // FinalizedBundle ... + rpc FinalizedBundle(QueryFinalizedBundleRequest) returns (QueryFinalizedBundleResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/finalized_bundle/{pool_id}/{id}"; + } + + // Queries the bundle which contains the data given height + rpc FinalizedBundlesByHeight(QueryFinalizedBundlesByHeightRequest) returns (QueryFinalizedBundlesByHeightResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/finalized_bundle_by_height/{pool_id}/{height}"; + } + + // CurrentVoteStatus ... + rpc CurrentVoteStatus(QueryCurrentVoteStatusRequest) returns (QueryCurrentVoteStatusResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/current_vote_status/{pool_id}"; + } + + // CanValidate ... + rpc CanValidate(QueryCanValidateRequest) returns (QueryCanValidateResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/can_validate/{pool_id}/{valaddress}"; + } + + // CanPropose ... + rpc CanPropose(QueryCanProposeRequest) returns (QueryCanProposeResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/can_propose/{pool_id}/{staker}/{proposer}/{from_index}"; + } + + // CanVote checks if voter on pool can still vote for the given bundle + rpc CanVote(QueryCanVoteRequest) returns (QueryCanVoteResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/can_vote/{pool_id}/{staker}/{voter}/{storage_id}"; + } +} + +// =========================== +// finalized_bundles/{pool_id} +// =========================== + +// QueryFinalizedBundlesRequest is the request type for the Query/Staker RPC method. +message QueryFinalizedBundlesRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; + // pool_id ... + uint64 pool_id = 2; +} + +// QueryStakersByPoolResponse is the response type for the Query/Staker RPC method. +message QueryFinalizedBundlesResponse { + // finalized_bundles ... + repeated kyve.bundles.v1beta1.FinalizedBundle finalized_bundles = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// =============================== +// finalized_bundle/{pool_id}/{id} +// =============================== + +// QueryFinalizedBundleRequest is the request type for the Query/Staker RPC method. +message QueryFinalizedBundleRequest { + // pool_id ... + uint64 pool_id = 1; + // id ... + uint64 id = 2; +} + +// QueryFinalizedBundleResponse is the response type for the Query/Staker RPC method. +message QueryFinalizedBundleResponse { + // finalized_bundle ... + kyve.bundles.v1beta1.FinalizedBundle finalized_bundle = 1 [(gogoproto.nullable) = false]; +} + +// =================================== +// finalized_bundle_by_height/{height} +// =================================== + +// QueryFinalizedBundleRequest is the request type for the Query/Staker RPC method. +message QueryFinalizedBundlesByHeightRequest { + // pool_id ... + uint64 pool_id = 1; + // id ... + uint64 height = 2; +} + +// QueryFinalizedBundleResponse is the response type for the Query/Staker RPC method. +message QueryFinalizedBundlesByHeightResponse { + // finalized_bundle ... + kyve.bundles.v1beta1.FinalizedBundle finalized_bundle = 1 [(gogoproto.nullable) = false]; +} + +// =============================== +// current_vote_status/{pool_id} +// =============================== + +// QueryCurrentVoteStatusRequest is the request type for the Query/Staker RPC method. +message QueryCurrentVoteStatusRequest { + // pool_id ... + uint64 pool_id = 1; +} + +// QueryCurrentVoteStatusResponse is the response type for the Query/Staker RPC method. +message QueryCurrentVoteStatusResponse { + // valid ... + uint64 valid = 1; + // invalid ... + uint64 invalid = 2; + // abstain ... + uint64 abstain = 3; + // total ... + uint64 total = 4; +} + +// =================================== +// can_validate/{pool_id}/{valaddress} +// =================================== + +// QueryCanProposeRequest is the request type for the Query/CanPropose RPC method. +message QueryCanValidateRequest { + // pool_id defines the unique ID of the pool. + uint64 pool_id = 1; + // valaddress ... + string valaddress = 2; +} + +// QueryCanProposeResponse is the response type for the Query/CanPropose RPC method. +message QueryCanValidateResponse { + // possible ... + bool possible = 1; + // reason ... + string reason = 2; +} + +// ====================================================== +// can_propose/{pool_id}/{staker}/{proposer}/{from_index} +// ====================================================== + +// QueryCanProposeRequest is the request type for the Query/CanPropose RPC method. +message QueryCanProposeRequest { + // pool_id defines the unique ID of the pool. + uint64 pool_id = 1; + // staker ... + string staker = 2; + // proposer ... + string proposer = 3; + // from_index ... + uint64 from_index = 4; +} + +// QueryCanProposeResponse is the response type for the Query/CanPropose RPC method. +message QueryCanProposeResponse { + // possible ... + bool possible = 1; + // reason ... + string reason = 2; +} + +// ================================================ +// can_vote/{pool_id}/{staker}/{voter}/{storage_id} +// ================================================ + +// QueryCanVoteRequest is the request type for the Query/CanVote RPC method. +message QueryCanVoteRequest { + // pool_id defines the unique ID of the pool. + uint64 pool_id = 1; + // staker ... + string staker = 2; + // voter ... + string voter = 3; + // storage_id ... + string storage_id = 4; +} + +// QueryCanVoteResponse is the response type for the Query/CanVote RPC method. +message QueryCanVoteResponse { + // possible ... + bool possible = 1; + // reason ... + string reason = 2; +} diff --git a/proto/kyve/query/v1beta1/delegation.proto b/proto/kyve/query/v1beta1/delegation.proto new file mode 100644 index 00000000..b57a80e7 --- /dev/null +++ b/proto/kyve/query/v1beta1/delegation.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/query/v1beta1/query.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +// QueryDelegation contains all rpc requests related to direct delegation data +service QueryDelegation { + // Delegator returns delegation information for a specific delegator of a specific staker. + rpc Delegator(QueryDelegatorRequest) returns (QueryDelegatorResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/delegator/{staker}/{delegator}"; + } + + // DelegatorsByStaker returns all delegators that have delegated to the given staker + // This query is paginated. + rpc DelegatorsByStaker(QueryDelegatorsByStakerRequest) returns (QueryDelegatorsByStakerResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/delegators_by_staker/{staker}"; + } + + // StakersByPoolAndDelegator returns all stakers the given delegator has delegated to. + // This query is paginated. + rpc StakersByDelegator(QueryStakersByDelegatorRequest) returns (QueryStakersByDelegatorResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/stakers_by_delegator/{delegator}"; + } +} + +// ============================== +// delegator/{staker}/{delegator} +// ============================== + +// QueryDelegatorRequest is the request type for the Query/Delegator RPC method. +message QueryDelegatorRequest { + // staker ... + string staker = 1; + // delegator ... + string delegator = 2; +} + +// QueryDelegatorResponse is the response type for the Query/Delegator RPC method. +message QueryDelegatorResponse { + // delegator ... + StakerDelegatorResponse delegator = 1; +} + +// StakerDelegatorResponse ... +message StakerDelegatorResponse { + // delegator ... + string delegator = 1; + // current_reward ... + uint64 current_reward = 2; + // delegation_amount ... + uint64 delegation_amount = 3; + // staker ... + string staker = 4; +} + +// ============================= +// delegators_by_staker/{staker} +// ============================= + +// QueryDelegatorsByStakerRequest ... +message QueryDelegatorsByStakerRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; + // staker ... + string staker = 2; +} + +// QueryDelegatorsByStakerResponse ... +message QueryDelegatorsByStakerResponse { + // delegators ... + repeated StakerDelegatorResponse delegators = 1 [(gogoproto.nullable) = false]; + // total_delegation ... (consider metadata object) + uint64 total_delegation = 2; + // total_delegation ... + uint64 total_delegator_count = 3; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 4; +} + +// ================================ +// stakers_by_delegator/{delegator} +// ================================ + +// QueryStakersByDelegatorRequest ... +message QueryStakersByDelegatorRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; + // delegator ... + string delegator = 2; +} + +// QueryStakersByDelegatorResponse ... +message QueryStakersByDelegatorResponse { + // delegator ... + string delegator = 1; + // stakers ... + repeated DelegationForStakerResponse stakers = 2 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 3; +} + +// DelegationForStakerResponse ... +message DelegationForStakerResponse { + // staker ... + FullStaker staker = 1; + // current_reward ... + uint64 current_reward = 2; + // delegation_amount ... + uint64 delegation_amount = 3; +} diff --git a/proto/kyve/query/v1beta1/params.proto b/proto/kyve/query/v1beta1/params.proto new file mode 100644 index 00000000..69a0133d --- /dev/null +++ b/proto/kyve/query/v1beta1/params.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "cosmos/gov/v1/query.proto"; +import "google/api/annotations.proto"; +import "kyve/bundles/v1beta1/params.proto"; +import "kyve/delegation/v1beta1/params.proto"; +import "kyve/global/v1beta1/global.proto"; +import "kyve/stakers/v1beta1/params.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +// QueryPool ... +service QueryParams { + // Pools queries for all pools. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/params"; + } +} + +// ======= +// /params +// ======= + +// QueryParamsRequest ... +message QueryParamsRequest {} + +// QueryParamsResponse ... +message QueryParamsResponse { + // bundles_params ... + kyve.bundles.v1beta1.Params bundles_params = 1; + // delegation_params ... + kyve.delegation.v1beta1.Params delegation_params = 2; + // global_params ... + kyve.global.v1beta1.Params global_params = 3; + // gov_params ... + cosmos.gov.v1.QueryParamsResponse gov_params = 4; + // stakers_params ... + kyve.stakers.v1beta1.Params stakers_params = 5; +} diff --git a/proto/kyve/query/v1beta1/pools.proto b/proto/kyve/query/v1beta1/pools.proto new file mode 100644 index 00000000..1bee7e2d --- /dev/null +++ b/proto/kyve/query/v1beta1/pools.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/bundles/v1beta1/bundles.proto"; +import "kyve/pool/v1beta1/pool.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +// QueryPool ... +service QueryPool { + // Pools queries for all pools. + rpc Pools(QueryPoolsRequest) returns (QueryPoolsResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/pools"; + } + + // Pool queries a pool by its Id. + rpc Pool(QueryPoolRequest) returns (QueryPoolResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/pool/{id}"; + } +} + +// ====== +// /pools +// ====== + +// QueryPoolsRequest is the request type for the Query/Pools RPC method. +message QueryPoolsRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; + // search ... + string search = 2; + // runtime ... + string runtime = 3; + // disabled ... + bool disabled = 4; + // storage_provider_id ... + uint32 storage_provider_id = 5; +} + +// QueryPoolsResponse is the response type for the Query/Pools RPC method. +message QueryPoolsResponse { + // pools ... + repeated PoolResponse pools = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// PoolResponse ... +message PoolResponse { + // id ... + uint64 id = 1; + // data ... + kyve.pool.v1beta1.Pool data = 2; + // bundle_proposal ... + kyve.bundles.v1beta1.BundleProposal bundle_proposal = 3; + // stakers ... + repeated string stakers = 4; + // total_stake ... + uint64 total_self_delegation = 5; + // total_delegation ... + uint64 total_delegation = 6; + // status ... + kyve.pool.v1beta1.PoolStatus status = 7; +} + +// ========= +// pool/{id} +// ========= + +// QueryPoolRequest is the request type for the Query/Pool RPC method. +message QueryPoolRequest { + // id defines the unique ID of the pool. + uint64 id = 1; +} + +// QueryPoolResponse is the response type for the Query/Pool RPC method. +message QueryPoolResponse { + // pool ... + PoolResponse pool = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/query/v1beta1/query.proto b/proto/kyve/query/v1beta1/query.proto new file mode 100644 index 00000000..88fa9dfb --- /dev/null +++ b/proto/kyve/query/v1beta1/query.proto @@ -0,0 +1,155 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "kyve/pool/v1beta1/pool.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +/* + This query-module servers all queries for the following (KYVE-)modules: + bundles, pool, delegation, stakers + + As a lot of request require composition of multiple module-data + everything will be served from one single query module. + + The named packages do not implement their own queries (expect for params) + + For simplicity all queries and their objects are in the corresponding + proto files + + Messages that are used for across multiple queries are defined here. +*/ + +// BasicPool contains the necessary properties need for a pool +// to be displayed in the UI +message BasicPool { + // id is the ID of the pool + uint64 id = 1; + + // name of the pool + string name = 2; + + // runtime for the protocol nodes + // like evm, bitcoin, etc. + string runtime = 3; + + // logo of the pool + string logo = 4; + + // operating_cost is the base payout for each bundle reward + uint64 operating_cost = 5; + + // upload_interval is the interval bundles get created + uint64 upload_interval = 6; + + // total_funds of the pool. If the pool runs + // out of funds no more bundles will be produced + uint64 total_funds = 7; + + // total_delegation of the pool + uint64 total_delegation = 8; + + // status of the pool if pool is able + // to produce bundles, etc. + kyve.pool.v1beta1.PoolStatus status = 9; +} + +// FullStaker aggregates information from the staker and its delegators +// as well as pending queue entries into one object. +// It contains almost all needed information for a convenient usage +message FullStaker { + // address of the staker + string address = 1; + + // metadata as logo, moniker, etc. + StakerMetadata metadata = 2; + + // amount the staker has delegated to himself + uint64 self_delegation = 3; + + // unbonding_amount is the amount the staker is currently unbonding + // from the self-delegation. + // This amount can be larger than `amount` when the staker + // got slashed during unbonding. However, at the end of + // the unbonding period this amount is double checked with the + // remaining amount. + uint64 self_delegation_unbonding = 4; + + // total_delegation returns the sum of all $KYVE users + // have delegated to this staker + uint64 total_delegation = 5; + + // delegator_count is the total number of individual + // delegator addresses for that user. + uint64 delegator_count = 6; + + // pools is a list of all pools the staker is currently + // participating, i.e. allowed to vote and upload data. + repeated PoolMembership pools = 7; +} + +// StakerMetadata contains static information for a staker +message StakerMetadata { + // commission is the percentage of the rewards that will + // get transferred to the staker before the remaining + // rewards are split across all delegators + string commission = 1; + + // moniker is a human-readable name for displaying + // the staker in the UI + string moniker = 2; + + // website is a https-link to the website of the staker + string website = 3; + + // logo is a link to an image file (like jpg or png) + string logo = 4; + + // pending_commission_change shows if the staker plans + // to change its commission. Delegators will see a warning in + // the UI. A Commission change takes some time until + // the commission is applied. Users have time to redelegate + // if they not agree with the new commission. + CommissionChangeEntry pending_commission_change = 5; +} + +// CommissionChangeEntry shows when the old commission +// of a staker will change to the new commission +message CommissionChangeEntry { + // commission is the new commission that will + // become active once the change-time is over + string commission = 1; + + // creation_date is the UNIX-timestamp (in seconds) + // of when the entry was created. + int64 creation_date = 2; +} + +// PoolMembership shows in which pool the staker +// is participating +message PoolMembership { + // pool contains useful information about the pool + BasicPool pool = 1; + + // points indicates if the staker is inactive + // If the staker misses a vote, a point is added. + // After 5 points the staker is removed from + // the stakers set. + uint64 points = 2; + + // is_leaving indicates if a user has scheduled a + // a PoolLeave entry. After the leave-time is over + // the staker will no longer participate in that pool + bool is_leaving = 3; + + // Valaddress is the address which is authorized to vote + // and submit bundles. If the server gets compromised + // the staker can just change the valaddress. + string valaddress = 4; + + // balance is the valaddress account balance and indicates + // whether or not the valaccount needs additional funds to + // pay for gas fees + uint64 balance = 5; +} diff --git a/proto/kyve/query/v1beta1/stakers.proto b/proto/kyve/query/v1beta1/stakers.proto new file mode 100644 index 00000000..7bcfd3d5 --- /dev/null +++ b/proto/kyve/query/v1beta1/stakers.proto @@ -0,0 +1,129 @@ +syntax = "proto3"; + +package kyve.query.v1beta1; + +import "cosmos/base/query/v1beta1/pagination.proto"; +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/query/v1beta1/query.proto"; +import "kyve/stakers/v1beta1/stakers.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/query/types"; + +// QueryStakers ... +service QueryStakers { + // Stakers queries for all stakers. + rpc Stakers(QueryStakersRequest) returns (QueryStakersResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/stakers"; + } + + // Staker queries for all stakers. + rpc Staker(QueryStakerRequest) returns (QueryStakerResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/staker/{address}"; + } + + // StakersByPool queries for all stakers that are currently participating in the given pool + rpc StakersByPool(QueryStakersByPoolRequest) returns (QueryStakersByPoolResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/stakers_by_pool/{pool_id}"; + } + + // StakersByPool queries for all stakers and sorted them first by number of pools participating and + // then by delegation + rpc StakersByPoolCount(QueryStakersByPoolCountRequest) returns (QueryStakersByPoolCountResponse) { + option (google.api.http).get = "/kyve/query/v1beta1/stakers_by_pool_count"; + } +} + +// ======= +// stakers +// ======= + +// QueryStakersRequest is the request type for the Query/Stakers RPC method. +message QueryStakersRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; + + // status looks whether a staker is participating in pools or not + StakerStatus status = 2; + + // search searches for moniker OR address + string search = 3; +} + +// StakerStatus ... +enum StakerStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // STAKER_STATUS_UNSPECIFIED ... + STAKER_STATUS_UNSPECIFIED = 0; + // STAKER_STATUS_ACTIVE ... + STAKER_STATUS_ACTIVE = 1; + // STAKER_STATUS_INACTIVE ... + STAKER_STATUS_INACTIVE = 2; +} + +// QueryStakersResponse is the response type for the Query/Stakers RPC method. +message QueryStakersResponse { + // stakers ... + repeated FullStaker stakers = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// ================ +// staker/{address} +// ================ + +// QueryStakerRequest is the request type for the Query/Staker RPC method. +message QueryStakerRequest { + // address ... + string address = 1; +} + +// QueryStakerResponse is the response type for the Query/Staker RPC method. +message QueryStakerResponse { + // staker ... + FullStaker staker = 1 [(gogoproto.nullable) = false]; +} + +// ========================= +// stakers_by_pool/{pool_id} +// ========================= + +// QueryStakersByPoolRequest is the request type for the Query/Staker RPC method. +message QueryStakersByPoolRequest { + // pool_id ... + uint64 pool_id = 1; +} + +// QueryStakersByPoolResponse is the response type for the Query/Staker RPC method. +message QueryStakersByPoolResponse { + // stakers ... + repeated StakerPoolResponse stakers = 1 [(gogoproto.nullable) = false]; +} + +// StakerPoolResponse ... +message StakerPoolResponse { + // staker ... + FullStaker staker = 1; + // valaccount ... + kyve.stakers.v1beta1.Valaccount valaccount = 2; +} + +// ========================= +// stakers_by_pool/{pool_id} +// ========================= + +// QueryStakersByPoolCountRequest ... +message QueryStakersByPoolCountRequest { + // pagination defines an optional pagination for the request. + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryStakersByPoolCountResponse ... +message QueryStakersByPoolCountResponse { + // stakers ... + repeated FullStaker stakers = 1 [(gogoproto.nullable) = false]; + // pagination defines the pagination in the response. + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} diff --git a/proto/kyve/stakers/v1beta1/events.proto b/proto/kyve/stakers/v1beta1/events.proto new file mode 100644 index 00000000..3b7fba3d --- /dev/null +++ b/proto/kyve/stakers/v1beta1/events.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package kyve.stakers.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/stakers/types"; + +// EventCreateStaker is an event emitted when a protocol node stakes in a pool. +// emitted_by: MsgCreateStaker +message EventCreateStaker { + // staker is the account address of the protocol node. + string staker = 1; + // amount ... + uint64 amount = 2; +} + +// EventUpdateMetadata is an event emitted when a protocol node updates their metadata. +// emitted_by: MsgUpdateMetadata +message EventUpdateMetadata { + // staker is the account address of the protocol node. + string staker = 1; + // moniker ... + string moniker = 2; + // website ... + string website = 3; + // logo ... + string logo = 4; +} + +// EventUpdateCommission ... +// emitted_by: EndBlock +message EventUpdateCommission { + // staker is the account address of the protocol node. + string staker = 1; + // commission ... + string commission = 2; +} + +// EventJoinPool ... +// emitted_by: MsgJoinPool +message EventJoinPool { + // pool_id is the pool the staker joined + uint64 pool_id = 1; + // staker is the address of the staker + string staker = 2; + // valaddress is the address of the protocol node which + // votes in favor of the staker + string valaddress = 3; + // amount is the amount of funds transferred to the valaddress + uint64 amount = 4; +} + +// EventLeavePool ... +// emitted_by: EndBlock +message EventLeavePool { + // pool_id ... + uint64 pool_id = 1; + // staker ... + string staker = 2; +} diff --git a/proto/kyve/stakers/v1beta1/genesis.proto b/proto/kyve/stakers/v1beta1/genesis.proto new file mode 100644 index 00000000..7c85c711 --- /dev/null +++ b/proto/kyve/stakers/v1beta1/genesis.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package kyve.stakers.v1beta1; + +import "gogoproto/gogo.proto"; +import "kyve/stakers/v1beta1/params.proto"; +import "kyve/stakers/v1beta1/stakers.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/stakers/types"; + +// GenesisState defines the stakers module's genesis state. +message GenesisState { + // params defines all the parameters of the module. + Params params = 1 [(gogoproto.nullable) = false]; + // staker_list ... + repeated Staker staker_list = 2 [(gogoproto.nullable) = false]; + // valaccount_list ... + repeated Valaccount valaccount_list = 3 [(gogoproto.nullable) = false]; + // commission_change_entries ... + repeated CommissionChangeEntry commission_change_entries = 4 [(gogoproto.nullable) = false]; + // queue_state_commission ... + QueueState queue_state_commission = 5 [(gogoproto.nullable) = false]; + // leave_pool_entries ... + repeated LeavePoolEntry leave_pool_entries = 6 [(gogoproto.nullable) = false]; + // queue_state_leave ... + QueueState queue_state_leave = 7 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/stakers/v1beta1/params.proto b/proto/kyve/stakers/v1beta1/params.proto new file mode 100644 index 00000000..551c9e54 --- /dev/null +++ b/proto/kyve/stakers/v1beta1/params.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package kyve.stakers.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/stakers/types"; + +// Params defines the stakers module parameters. +message Params { + // commission_change_time ... + uint64 commission_change_time = 1; + // commission_change_time ... + uint64 leave_pool_time = 2; +} diff --git a/proto/kyve/stakers/v1beta1/query.proto b/proto/kyve/stakers/v1beta1/query.proto new file mode 100644 index 00000000..ed91f8e7 --- /dev/null +++ b/proto/kyve/stakers/v1beta1/query.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package kyve.stakers.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/stakers/v1beta1/params.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/stakers/types"; + +// Query defines the gRPC querier service. +service Query { + // Parameters queries the parameters of the module. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/kyve/stakers/v1beta1/params"; + } +} + +// QueryParamsRequest is request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is response type for the Query/Params RPC method. +message QueryParamsResponse { + // params holds all the parameters of this module. + Params params = 1 [(gogoproto.nullable) = false]; +} diff --git a/proto/kyve/stakers/v1beta1/stakers.proto b/proto/kyve/stakers/v1beta1/stakers.proto new file mode 100644 index 00000000..66ac6513 --- /dev/null +++ b/proto/kyve/stakers/v1beta1/stakers.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package kyve.stakers.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/stakers/types"; + +// Staker contains all metadata for a staker +// Every address can only create one staker (itself) +message Staker { + // address ... + string address = 1; + // commission ... + string commission = 2; + // moniker ... + string moniker = 3; + // website ... + string website = 4; + // logo ... + string logo = 5; +} + +// Valaccount gets authorized by a staker to +// vote in a given pool by favor of the staker. +message Valaccount { + // pool_id defines the pool in which the address + // is allowed to vote in. + uint64 pool_id = 1; + // staker is the address the valaccount is voting for. + string staker = 2; + // valaddress is the account stored on the protocol + // node which votes for the staker in the given pool + string valaddress = 3; + // When a node is inactive (does not vote at all) + // A point is added, after a certain amount of points + // is reached the node gets kicked out. + uint64 points = 4; + // isLeaving indicates if a staker is leaving the given pool. + bool is_leaving = 5; +} + +// CommissionChangeEntry stores the information for an +// upcoming commission change. A commission change is never +// instant, so delegators have time to redelegate in case +// they don't agree with the new commission. +message CommissionChangeEntry { + // index is needed for the queue-algorithm which + // processes the commission changes + uint64 index = 1; + // staker is the address of the affected staker + string staker = 2; + // commission is the new commission which will + // be applied after the waiting time is over. + string commission = 3; + // creation_date is the UNIX-timestamp in seconds + // when the entry was created. + int64 creation_date = 4; +} + +// LeavePoolEntry stores the information for an upcoming +// pool leave. A staker can't leave a pool instantly. +// Instead a the `LeaveTime` needs to be awaited. +// If a staker start to leave a pool, it will be shown +// in the UI to the delegators. +message LeavePoolEntry { + // index is needed for the queue-algorithm which + // processes the commission changes + uint64 index = 1; + // staker is the address of the affected staker + string staker = 2; + // pool_id indicates the pool the staker wants to leave + uint64 pool_id = 3; + // creation_date is the UNIX-timestamp in seconds + // when the entry was created. + int64 creation_date = 4; +} + +// UnbondingState stores the state for the unbonding of stakes and delegations. +message QueueState { + // low_index is the tail of the queue. It is the + // oldest entry in the queue. If this entry isn't + // due, non of the other entries is. + uint64 low_index = 1; + // high_index is the head of the queue. New entries + // are added to the top. + uint64 high_index = 2; +} diff --git a/proto/kyve/stakers/v1beta1/tx.proto b/proto/kyve/stakers/v1beta1/tx.proto new file mode 100644 index 00000000..729f36ae --- /dev/null +++ b/proto/kyve/stakers/v1beta1/tx.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; + +package kyve.stakers.v1beta1; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/stakers/types"; + +// Msg defines the Msg service. +service Msg { + // CreateStaker ... + rpc CreateStaker(MsgCreateStaker) returns (MsgCreateStakerResponse); + // UpdateMetadata ... + rpc UpdateMetadata(MsgUpdateMetadata) returns (MsgUpdateMetadataResponse); + // UpdateCommission ... + rpc UpdateCommission(MsgUpdateCommission) returns (MsgUpdateCommissionResponse); + // JoinPool ... + rpc JoinPool(MsgJoinPool) returns (MsgJoinPoolResponse); + // LeavePool ... + rpc LeavePool(MsgLeavePool) returns (MsgLeavePoolResponse); + + // UpdateParams defines a governance operation for updating the x/stakers module + // parameters. The authority is hard-coded to the x/gov module account. + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} + +// MsgStakePool defines a SDK message for staking in a pool. +message MsgCreateStaker { + // creator ... + string creator = 1; + // amount ... + uint64 amount = 2; +} + +// MsgStakePoolResponse defines the Msg/StakePool response type. +message MsgCreateStakerResponse {} + +// MsgUpdateMetadata defines a SDK message for claiming the uploader role. +message MsgUpdateMetadata { + // creator ... + string creator = 1; + // moniker ... + string moniker = 2; + // website ... + string website = 3; + // logo + string logo = 4; +} + +// MsgUpdateMetadataResponse defines the Msg/MsgUpdateMetadata response type. +message MsgUpdateMetadataResponse {} + +// MsgUpdateCommission ... +message MsgUpdateCommission { + // creator ... + string creator = 1; + // commission ... + string commission = 2; +} + +// MsgUpdateCommissionResponse ... +message MsgUpdateCommissionResponse {} + +// MsgJoinPool ... +message MsgJoinPool { + // creator ... + string creator = 1; + // pool_id ... + uint64 pool_id = 2; + // valaddress ... + string valaddress = 3; + // amount ... + uint64 amount = 4; +} + +// MsgJoinPoolResponse ... +message MsgJoinPoolResponse {} + +// MsgLeavePool ... +message MsgLeavePool { + // creator ... + string creator = 1; + // pool_id ... + uint64 pool_id = 2; +} + +// MsgReactivateStakerResponse ... +message MsgLeavePoolResponse {} + +// MsgUpdateParams defines a SDK message for updating the module parameters. +message MsgUpdateParams { + // authority is the address of the governance account. + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + + // payload defines the x/stakers parameters to update. + string payload = 2; +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +message MsgUpdateParamsResponse {} diff --git a/proto/kyve/team/v1beta1/events.proto b/proto/kyve/team/v1beta1/events.proto new file mode 100644 index 00000000..791c848c --- /dev/null +++ b/proto/kyve/team/v1beta1/events.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package kyve.team.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/team/types"; + +// MsgCreateTeamVestingAccount is an event emitted when a new team vesting account gets created. +// emitted_by: MsgCreateTeamVestingAccount +message EventCreateTeamVestingAccount { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // total_allocation is the number of tokens reserved for this team member. + uint64 total_allocation = 2; + // commencement is the unix timestamp of the member's official start date. + uint64 commencement = 3; +} + +// EventClawback is an event emitted when the authority claws back tokens from a team vesting account. +// emitted_by: MsgClawback +message EventClawback { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // clawback is a unix timestamp of a clawback. If timestamp is zero + // it means that the account has not received a clawback + uint64 clawback = 2; + // amount which got clawed back. + uint64 amount = 3; +} + +// EventClaimedUnlocked is an event emitted when the authority claims unlocked $KYVE for a recipient. +// emitted_by: MsgClaimUnlocked +message EventClaimedUnlocked { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // amount is the number of tokens claimed from the unlocked amount. + uint64 amount = 2; + // recipient is the receiver address of the claim. + string recipient = 3; +} + +// EventClaimInflationRewards is an event emitted when the authority claims inflation rewards for a recipient. +// emitted_by: MsgClaimInflationRewards +message EventClaimInflationRewards { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // amount is the amount of inflation rewards the authority should claim for the account holder + uint64 amount = 2; + // recipient is the receiver address of the claim. + string recipient = 3; +} + +// EventClaimAuthorityRewards is an event emitted when the authority claims its inflation rewards for a recipient. +// emitted_by: MsgClaimAuthorityRewards +message EventClaimAuthorityRewards { + // amount is the amount of inflation rewards the authority should claim for the account holder + uint64 amount = 1; + // recipient is the receiver address of the claim. + string recipient = 2; +} diff --git a/proto/kyve/team/v1beta1/genesis.proto b/proto/kyve/team/v1beta1/genesis.proto new file mode 100644 index 00000000..c6063605 --- /dev/null +++ b/proto/kyve/team/v1beta1/genesis.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package kyve.team.v1beta1; + +import "gogoproto/gogo.proto"; +import "kyve/team/v1beta1/team.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/team/types"; + +// GenesisState defines the team module's genesis state. +message GenesisState { + // authority ... + Authority authority = 2 [(gogoproto.nullable) = false]; + // account_list ... + repeated TeamVestingAccount account_list = 3 [(gogoproto.nullable) = false]; + // account_count ... + uint64 account_count = 4; +} diff --git a/proto/kyve/team/v1beta1/query.proto b/proto/kyve/team/v1beta1/query.proto new file mode 100644 index 00000000..9cf6152d --- /dev/null +++ b/proto/kyve/team/v1beta1/query.proto @@ -0,0 +1,191 @@ +syntax = "proto3"; + +package kyve.team.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "kyve/team/v1beta1/team.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/team/types"; + +// Query defines the gRPC querier service. +service Query { + // TeamInfo queries all important information from the team module + rpc TeamInfo(QueryTeamInfoRequest) returns (QueryTeamInfoResponse) { + option (google.api.http).get = "/kyve/team/v1beta1/team_info"; + } + + // TeamVestingAccounts queries all team vesting accounts of the module. + rpc TeamVestingAccounts(QueryTeamVestingAccountsRequest) returns (QueryTeamVestingAccountsResponse) { + option (google.api.http).get = "/kyve/team/v1beta1/team_vesting_accounts"; + } + + // TeamVestingAccount queries the team vesting accounts of the module. + rpc TeamVestingAccount(QueryTeamVestingAccountRequest) returns (QueryTeamVestingAccountResponse) { + option (google.api.http).get = "/kyve/team/v1beta1/team_vesting_account/{id}"; + } + + // TeamCurrentVestingStatus queries the current vesting progress of a team vesting account + rpc TeamVestingStatus(QueryTeamVestingStatusRequest) returns (QueryTeamVestingStatusResponse) { + option (google.api.http).get = "/kyve/team/v1beta1/team_vesting_status/{id}"; + } + + // TeamCurrentVestingStatus queries the current vesting progress of a team vesting account + rpc TeamVestingStatusByTime(QueryTeamVestingStatusByTimeRequest) returns (QueryTeamVestingStatusByTimeResponse) { + option (google.api.http).get = "/kyve/team/v1beta1/team_vesting_status_by_time/{id}/{time}"; + } +} + +// ====== +// /team_info +// ====== + +// QueryAccountsRequest is request type for the Query/TeamInfo RPC method. +message QueryTeamInfoRequest {} + +// QueryAccountsResponse is response type for the Query/TeamInfo RPC method. +message QueryTeamInfoResponse { + // authority is the authorities address + string authority = 1; + // total_team_allocation is the total allocation in $KYVE the team module has in order to reward team members + uint64 total_team_allocation = 2; + // issued_team_allocation is the amount in $KYVE tied to team vesting accounts and which are not available anymore + uint64 issued_team_allocation = 3; + // available_team_allocation is the amount in $KYVE with which further team vesting accounts can be created. + // if the available amount is zero no new vesting accounts can be created + uint64 available_team_allocation = 4; + + // total_authority_rewards is the amount in $KYVE the authority has earned in total with inflation rewards. + // Those rewards can be payed out for different purposes + uint64 total_authority_rewards = 5; + // claimed_authority_rewards is the amount in $KYVE of how much the authority already claimed + uint64 claimed_authority_rewards = 6; + // available_authority_rewards is the amount in $KYVE of how much rewards the authority can claim right now + uint64 available_authority_rewards = 7; + + // total_account_rewards is the amount in $KYVE all team vesting accounts have ever received + uint64 total_account_rewards = 8; + // claimed_account_rewards is the amount in $KYVE all team vesting accounts have ever claimed + uint64 claimed_account_rewards = 9; + // available_account_rewards is the total amount of $KYVE all team vesting accounts can currently claim + uint64 available_account_rewards = 10; + + // required_module_balance is the balance the team module should have. If this is less than the module balance + // something went wrong + uint64 required_module_balance = 11; + // team_module_balance is the team module balance in $KYVE + uint64 team_module_balance = 12; +} + +// ====== +// /team_vesting_accounts +// ====== + +// QueryAccountsRequest is request type for the Query/TeamVestingAccounts RPC method. +message QueryTeamVestingAccountsRequest {} + +// QueryAccountsResponse is response type for the Query/TeamVestingAccounts RPC method. +message QueryTeamVestingAccountsResponse { + // accounts holds all the team vesting accounts of this module. + repeated kyve.team.v1beta1.TeamVestingAccount accounts = 1 [(gogoproto.nullable) = false]; +} + +// ========= +// team_vesting_account/{id} +// ========= + +// QueryTeamVestingAccountRequest is request type for the Query/TeamVestingAccount RPC method. +message QueryTeamVestingAccountRequest { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; +} + +// QueryTeamVestingAccountResponse is the response type for the Query/TeamVestingAccount RPC method. +message QueryTeamVestingAccountResponse { + // account holds the requested team vesting account + kyve.team.v1beta1.TeamVestingAccount account = 1 [(gogoproto.nullable) = false]; +} + +// ========= +// team_vesting_status/{id} +// ========= + +// QueryTeamCurrentVestingStatusRequest is request type for the Query/TeamCurrentVestingStatus RPC method. +message QueryTeamVestingStatusRequest { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; +} + +// QueryTeamCurrentVestingStatusResponse is the response type for the Query/TeamCurrentVestingStatus RPC method. +message QueryTeamVestingStatusResponse { + // request_date .. + string request_date = 1; + // plan ... + QueryVestingPlan plan = 2; + // status .. + QueryVestingStatus status = 3; +} + +// ========= +// team_vesting_status_by_time/{id}/{time} +// ========= + +// QueryTeamVestingStatusByTimeRequest is request type for the Query/TeamCurrentVestingByTimeStatus RPC method. +message QueryTeamVestingStatusByTimeRequest { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // time is a unix timestamp of the time the vesting progress should be calculated + uint64 time = 2; +} + +// QueryTeamVestingStatusByTimeResponse is the response type for the Query/TeamCurrentVestingByTimeStatus RPC method. +message QueryTeamVestingStatusByTimeResponse { + // request_date .. + string request_date = 1; + // plan ... + QueryVestingPlan plan = 2; + // status .. + QueryVestingStatus status = 3; +} + +// QueryVestingStatus is a type holding information about the account's vesting progress +message QueryVestingStatus { + // total_vested_amount ... + uint64 total_vested_amount = 1; + // total_unlocked_amount ... + uint64 total_unlocked_amount = 2; + // current_claimable_amount ... + uint64 current_claimable_amount = 3; + // locked_vested_amount ... + uint64 locked_vested_amount = 4; + // remaining_unvested_amount ... + uint64 remaining_unvested_amount = 5; + // claimed_amount ... + uint64 claimed_amount = 6; + // total_rewards ... + uint64 total_rewards = 7; + // claimed_rewards ... + uint64 claimed_rewards = 8; + // available_rewards ... + uint64 available_rewards = 9; +} + +// QueryVestingPlan is a type holding information about the account's vesting data which does not change +message QueryVestingPlan { + // commencement ... + string commencement = 1; + // token_vesting_start ... + string token_vesting_start = 2; + // token_vesting_finished ... + string token_vesting_finished = 3; + // token_unlock_start ... + string token_unlock_start = 4; + // token_unlock_finished ... + string token_unlock_finished = 5; + // clawback ... + uint64 clawback = 6; + // clawback_amount ... + uint64 clawback_amount = 7; + // maximum_vesting_amount ... + uint64 maximum_vesting_amount = 8; +} diff --git a/proto/kyve/team/v1beta1/team.proto b/proto/kyve/team/v1beta1/team.proto new file mode 100644 index 00000000..df8ab1a0 --- /dev/null +++ b/proto/kyve/team/v1beta1/team.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package kyve.team.v1beta1; + +option go_package = "github.com/KYVENetwork/chain/x/team/types"; + +// Authority ... +message Authority { + // total inflation rewards is the total amount of rewards the authority has received ever + uint64 total_rewards = 1; + // claimed is the amount of inflation rewards claimed by the authority + uint64 rewards_claimed = 2; +} + +// TeamVestingAccount ... +message TeamVestingAccount { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // total_allocation is the number of tokens reserved for this team member. + uint64 total_allocation = 2; + // commencement is the unix timestamp of the member's official start date in seconds + uint64 commencement = 3; + // clawback is a unix timestamp of a clawback in seconds. If timestamp is zero + // it means that the account has not received a clawback + uint64 clawback = 4; + // unlocked_claimed is the amount of $KYVE already claimed by the account holder + uint64 unlocked_claimed = 5; + // the last time the unlocked amount was claimed + uint64 last_claimed_time = 6; + // total rewards is the total amount of rewards the account has received ever + uint64 total_rewards = 7; + // rewards claimed is the amount inflation rewards claimed by account holder + uint64 rewards_claimed = 8; +} diff --git a/proto/kyve/team/v1beta1/tx.proto b/proto/kyve/team/v1beta1/tx.proto new file mode 100644 index 00000000..392de6c7 --- /dev/null +++ b/proto/kyve/team/v1beta1/tx.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; + +package kyve.team.v1beta1; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "github.com/KYVENetwork/chain/x/team/types"; + +// Msg defines the Msg service. +service Msg { + // ClaimUnlocked ... + rpc ClaimUnlocked(MsgClaimUnlocked) returns (MsgClaimUnlockedResponse); + // Clawback ... + rpc Clawback(MsgClawback) returns (MsgClawbackResponse); + // CreateTeamVestingAccount ... + rpc CreateTeamVestingAccount(MsgCreateTeamVestingAccount) returns (MsgCreateTeamVestingAccountResponse); + // ClaimAuthorityRewards ... + rpc ClaimAuthorityRewards(MsgClaimAuthorityRewards) returns (MsgClaimAuthorityRewardsResponse); + // ClaimInflationRewards ... + rpc ClaimAccountRewards(MsgClaimAccountRewards) returns (MsgClaimAccountRewardsResponse); +} + +// MsgClaimUnlockedTokens ... +message MsgClaimUnlocked { + // authority is the foundation which is allowed to payout unlocked tokens + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // id is the unique identifier of the team member + uint64 id = 2; + // amount of $KYVE that will be paid to the recipient and marked as deducted from the unlocked amount. + uint64 amount = 3; + // recipient is the recipient address chosen by the team member. + string recipient = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; +} + +// MsgClaimUnlockedResponse defines the Msg/ClaimUnlockedTokens response type. +message MsgClaimUnlockedResponse {} + +// MsgClaimAuthorityRewards ... +message MsgClaimAuthorityRewards { + // authority is the foundation which is allowed to payout unlocked tokens + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // amount of $KYVE that will be paid to the recipient and marked as deducted from the authority inflation rewards + uint64 amount = 2; + // recipient is the recipient address chosen by the team member. + string recipient = 3 [(cosmos_proto.scalar) = "cosmos.AddressString"]; +} + +// MsgClaimAuthorityRewardsResponse defines the Msg/ClaimAuthorityRewards response type. +message MsgClaimAuthorityRewardsResponse {} + +// MsgClaimAccountRewards ... +message MsgClaimAccountRewards { + // authority is the foundation which is allowed to payout unlocked tokens + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // id is the unique identifier of the team member + uint64 id = 2; + // amount of $KYVE that will be paid to the recipient and marked as deducted from the inflation rewards + uint64 amount = 3; + // recipient is the recipient address chosen by the team member. + string recipient = 4 [(cosmos_proto.scalar) = "cosmos.AddressString"]; +} + +// MsgClaimAccountRewardsResponse defines the Msg/ClaimAccountRewards response type. +message MsgClaimAccountRewardsResponse {} + +// MsgClawback ... +message MsgClawback { + // authority is the foundation which is allowed to modify team accounts + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // id is the unique identifier of the team member + uint64 id = 2; + // clawback is a unix timestamp (in seconds) of when the clawback should be applied + uint64 clawback = 3; +} + +// MsgClawbackResponse defines the Msg/Clawback response type. +message MsgClawbackResponse {} + +// MsgCreateTeamVestingAccount ... +message MsgCreateTeamVestingAccount { + // authority ... + string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // total_allocation is the number of tokens reserved for this team member. + uint64 total_allocation = 2; + // commencement is the unix timestamp of the member's official start date. + uint64 commencement = 3; +} + +// MsgCreateTeamVestingAccountResponse defines the Msg/CreateTeamVestingAccount response type. +message MsgCreateTeamVestingAccountResponse {} diff --git a/testutil/integration/checks.go b/testutil/integration/checks.go new file mode 100644 index 00000000..0af79364 --- /dev/null +++ b/testutil/integration/checks.go @@ -0,0 +1,485 @@ +package integration + +import ( + "github.com/KYVENetwork/chain/x/bundles" + "github.com/KYVENetwork/chain/x/delegation" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + globalTypes "github.com/KYVENetwork/chain/x/global/types" + "github.com/KYVENetwork/chain/x/pool" + querytypes "github.com/KYVENetwork/chain/x/query/types" + "github.com/KYVENetwork/chain/x/stakers" + "github.com/KYVENetwork/chain/x/team" + "github.com/cosmos/cosmos-sdk/types/query" + . "github.com/onsi/gomega" + + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func (suite *KeeperTestSuite) PerformValidityChecks() { + // verify pool module + suite.VerifyPoolModuleAssetsIntegrity() + suite.VerifyPoolTotalFunds() + + // TODO(@troy,@max): Figure out a better way to check this when disabling pools. + // suite.VerifyPoolQueries() + + suite.VerifyPoolGenesisImportExport() + + // verify stakers module + suite.VerifyStakersGenesisImportExport() + suite.VerifyStakersModuleAssetsIntegrity() + suite.VerifyPoolTotalStake() + suite.VerifyStakersQueries() + suite.VerifyActiveStakers() + + // verify bundles module + suite.VerifyBundlesQueries() + suite.VerifyBundlesGenesisImportExport() + + // verify delegation module + suite.VerifyDelegationQueries() + suite.VerifyDelegationModuleIntegrity() + suite.VerifyDelegationGenesisImportExport() + + // verify team module + // TODO(@troy): implement team funds integrity checks + suite.VerifyTeamGenesisImportExport() +} + +// ================== +// pool module checks +// ================== + +func (suite *KeeperTestSuite) VerifyPoolModuleAssetsIntegrity() { + expectedBalance := uint64(0) + actualBalance := uint64(0) + + for _, pool := range suite.App().PoolKeeper.GetAllPools(suite.Ctx()) { + for _, funder := range pool.Funders { + expectedBalance += funder.Amount + } + } + + moduleAcc := suite.App().AccountKeeper.GetModuleAccount(suite.Ctx(), pooltypes.ModuleName).GetAddress() + actualBalance = suite.App().BankKeeper.GetBalance(suite.Ctx(), moduleAcc, globalTypes.Denom).Amount.Uint64() + + Expect(actualBalance).To(Equal(expectedBalance)) +} + +func (suite *KeeperTestSuite) VerifyPoolTotalFunds() { + for _, pool := range suite.App().PoolKeeper.GetAllPools(suite.Ctx()) { + expectedBalance := uint64(0) + actualBalance := pool.TotalFunds + + for _, funder := range pool.Funders { + expectedBalance += funder.Amount + } + + Expect(actualBalance).To(Equal(expectedBalance)) + } +} + +func (suite *KeeperTestSuite) VerifyPoolQueries() { + poolsState := suite.App().PoolKeeper.GetAllPools(suite.Ctx()) + + poolsQuery := make([]querytypes.PoolResponse, 0) + + activePoolsQuery, activePoolsQueryErr := suite.App().QueryKeeper.Pools(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryPoolsRequest{}) + disabledPoolsQuery, disabledPoolsQueryErr := suite.App().QueryKeeper.Pools(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryPoolsRequest{ + Disabled: true, + }) + + poolsQuery = append(poolsQuery, activePoolsQuery.Pools...) + poolsQuery = append(poolsQuery, disabledPoolsQuery.Pools...) + + Expect(activePoolsQueryErr).To(BeNil()) + Expect(disabledPoolsQueryErr).To(BeNil()) + + Expect(poolsQuery).To(HaveLen(len(poolsState))) + + for i := range poolsState { + bundleProposalState, _ := suite.App().BundlesKeeper.GetBundleProposal(suite.Ctx(), poolsState[i].Id) + stakersState := suite.App().StakersKeeper.GetAllStakerAddressesOfPool(suite.Ctx(), poolsState[i].Id) + totalDelegationState := suite.App().DelegationKeeper.GetDelegationOfPool(suite.Ctx(), poolsState[i].Id) + + Expect(poolsQuery[i].Id).To(Equal(poolsState[i].Id)) + Expect(*poolsQuery[i].Data).To(Equal(poolsState[i])) + Expect(*poolsQuery[i].BundleProposal).To(Equal(bundleProposalState)) + Expect(poolsQuery[i].Stakers).To(Equal(stakersState)) + Expect(poolsQuery[i].TotalDelegation).To(Equal(totalDelegationState)) + + // test pool by id + poolByIdQuery, poolByIdQueryErr := suite.App().QueryKeeper.Pool(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryPoolRequest{ + Id: poolsState[i].Id, + }) + + Expect(poolByIdQueryErr).To(BeNil()) + Expect(poolByIdQuery.Pool.Id).To(Equal(poolsState[i].Id)) + Expect(*poolByIdQuery.Pool.Data).To(Equal(poolsState[i])) + Expect(*poolByIdQuery.Pool.BundleProposal).To(Equal(bundleProposalState)) + Expect(poolByIdQuery.Pool.Stakers).To(Equal(stakersState)) + Expect(poolByIdQuery.Pool.TotalDelegation).To(Equal(totalDelegationState)) + + // test stakers by pool + valaccounts := suite.App().StakersKeeper.GetAllValaccountsOfPool(suite.Ctx(), poolsState[i].Id) + stakersByPoolState := make([]querytypes.StakerPoolResponse, 0) + + for _, valaccount := range valaccounts { + staker, stakerFound := suite.App().StakersKeeper.GetStaker(suite.Ctx(), valaccount.Staker) + + if stakerFound { + stakersByPoolState = append(stakersByPoolState, querytypes.StakerPoolResponse{ + Staker: suite.App().QueryKeeper.GetFullStaker(suite.Ctx(), staker.Address), + Valaccount: valaccount, + }) + } + } + + stakersByPoolQuery, stakersByPoolQueryErr := suite.App().QueryKeeper.StakersByPool(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryStakersByPoolRequest{ + PoolId: poolsState[i].Id, + }) + + Expect(stakersByPoolQueryErr).To(BeNil()) + Expect(stakersByPoolQuery.Stakers).To(HaveLen(len(stakersByPoolState))) + + for s := range stakersByPoolState { + Expect(stakersByPoolQuery.Stakers[s]).To(Equal(stakersByPoolState[s])) + } + } +} + +func (suite *KeeperTestSuite) VerifyPoolGenesisImportExport() { + genState := pool.ExportGenesis(suite.Ctx(), suite.App().PoolKeeper) + + // Delete all entries in Pool Store + store := suite.Ctx().KVStore(suite.App().PoolKeeper.StoreKey()) + suite.deleteStore(store) + + err := genState.Validate() + Expect(err).To(BeNil()) + pool.InitGenesis(suite.Ctx(), suite.App().PoolKeeper, *genState) +} + +// ===================== +// stakers module checks +// ===================== + +func (suite *KeeperTestSuite) VerifyStakersModuleAssetsIntegrity() { + //expectedBalance := uint64(0) + //actualBalance := uint64(0) + // + //for _, staker := range suite.App().StakersKeeper.GetAllStakers(suite.Ctx()) { + // expectedBalance += suite.App().DelegationKeeper + //} + // + //moduleAcc := suite.App().AccountKeeper.GetModuleAccount(suite.Ctx(), stakerstypes.ModuleName).GetAddress() + //actualBalance = suite.App().BankKeeper.GetBalance(suite.Ctx(), moduleAcc, globalTypes.Denom).Amount.Uint64() + // + //Expect(actualBalance).To(Equal(expectedBalance)) +} + +func (suite *KeeperTestSuite) VerifyPoolTotalStake() { + for _, pool := range suite.App().PoolKeeper.GetAllPools(suite.Ctx()) { + expectedBalance := uint64(0) + actualBalance := suite.App().DelegationKeeper.GetDelegationOfPool(suite.Ctx(), pool.Id) + + for _, stakerAddress := range suite.App().StakersKeeper.GetAllStakerAddressesOfPool(suite.Ctx(), pool.Id) { + expectedBalance += suite.App().DelegationKeeper.GetDelegationAmount(suite.Ctx(), stakerAddress) + } + + Expect(actualBalance).To(Equal(expectedBalance)) + } +} + +func (suite *KeeperTestSuite) VerifyActiveStakers() { + totalDelegation := uint64(0) + for _, delegator := range suite.App().DelegationKeeper.GetAllDelegators(suite.Ctx()) { + if len(suite.App().StakersKeeper.GetValaccountsFromStaker(suite.Ctx(), delegator.Staker)) > 0 { + totalDelegation += suite.App().DelegationKeeper.GetDelegationAmountOfDelegator(suite.Ctx(), delegator.Staker, delegator.Delegator) + + validators, _ := suite.App().StakersKeeper.GetDelegations(suite.ctx, delegator.Delegator) + Expect(validators).To(ContainElement(delegator.Staker)) + } + } + Expect(suite.App().StakersKeeper.TotalBondedTokens(suite.Ctx()).Uint64()).To(Equal(totalDelegation)) +} + +func (suite *KeeperTestSuite) VerifyStakersQueries() { + stakersState := suite.App().StakersKeeper.GetAllStakers(suite.Ctx()) + stakersQuery, stakersQueryErr := suite.App().QueryKeeper.Stakers(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryStakersRequest{ + Pagination: &query.PageRequest{ + Limit: 1000, + }, + }) + + stakersMap := make(map[string]querytypes.FullStaker, 0) + for _, staker := range stakersQuery.Stakers { + stakersMap[staker.Address] = staker + } + + Expect(stakersQueryErr).To(BeNil()) + Expect(stakersQuery.Stakers).To(HaveLen(len(stakersState))) + + for i := range stakersState { + address := stakersState[i].Address + suite.verifyFullStaker(stakersMap[address], address) + + stakerByAddressQuery, stakersByAddressQueryErr := suite.App().QueryKeeper.Staker(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryStakerRequest{ + Address: address, + }) + + Expect(stakersByAddressQueryErr).To(BeNil()) + suite.verifyFullStaker(stakerByAddressQuery.Staker, address) + } +} + +func (suite *KeeperTestSuite) VerifyStakersGenesisImportExport() { + genState := stakers.ExportGenesis(suite.Ctx(), suite.App().StakersKeeper) + + // Delete all entries in Stakers Store + store := suite.Ctx().KVStore(suite.App().StakersKeeper.StoreKey()) + iterator := store.Iterator(nil, nil) + keys := make([][]byte, 0) + for ; iterator.Valid(); iterator.Next() { + key := make([]byte, len(iterator.Key())) + copy(key, iterator.Key()) + keys = append(keys, key) + } + iterator.Close() + for _, key := range keys { + store.Delete(key) + } + + err := genState.Validate() + Expect(err).To(BeNil()) + stakers.InitGenesis(suite.Ctx(), suite.App().StakersKeeper, *genState) +} + +// ===================== +// bundles module checks +// ===================== + +func (suite *KeeperTestSuite) VerifyBundlesQueries() { + pools := suite.App().PoolKeeper.GetAllPools(suite.Ctx()) + + for _, pool := range pools { + finalizedBundlesState := suite.App().BundlesKeeper.GetFinalizedBundlesByPool(suite.Ctx(), pool.Id) + finalizedBundlesQuery, finalizedBundlesQueryErr := suite.App().QueryKeeper.FinalizedBundles(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryFinalizedBundlesRequest{ + PoolId: pool.Id, + }) + + Expect(finalizedBundlesQueryErr).To(BeNil()) + Expect(finalizedBundlesQuery.FinalizedBundles).To(HaveLen(len(finalizedBundlesState))) + + for i := range finalizedBundlesState { + Expect(finalizedBundlesQuery.FinalizedBundles[i]).To(Equal(finalizedBundlesState[i])) + + finalizedBundleQuery, finalizedBundleQueryErr := suite.App().QueryKeeper.FinalizedBundle(sdk.WrapSDKContext(suite.Ctx()), &querytypes.QueryFinalizedBundleRequest{ + PoolId: pool.Id, + Id: finalizedBundlesState[i].Id, + }) + + Expect(finalizedBundleQueryErr).To(BeNil()) + Expect(finalizedBundleQuery.FinalizedBundle).To(Equal(finalizedBundlesState[i])) + } + } +} + +func (suite *KeeperTestSuite) VerifyBundlesGenesisImportExport() { + genState := bundles.ExportGenesis(suite.Ctx(), suite.App().BundlesKeeper) + err := genState.Validate() + Expect(err).To(BeNil()) + bundles.InitGenesis(suite.Ctx(), suite.App().BundlesKeeper, *genState) +} + +// ======================== +// delegation module checks +// ======================== + +func (suite *KeeperTestSuite) VerifyDelegationQueries() { + goCtx := sdk.WrapSDKContext(suite.Ctx()) + for _, delegator := range suite.App().DelegationKeeper.GetAllDelegators(suite.Ctx()) { + + // Query: delegator/{staker}/{delegator} + resD, errD := suite.App().QueryKeeper.Delegator(goCtx, &querytypes.QueryDelegatorRequest{ + Staker: delegator.Staker, + Delegator: delegator.Delegator, + }) + Expect(errD).To(BeNil()) + Expect(resD.Delegator.Delegator).To(Equal(delegator.Delegator)) + Expect(resD.Delegator.Staker).To(Equal(delegator.Staker)) + Expect(resD.Delegator.DelegationAmount).To(Equal(suite.App().DelegationKeeper.GetDelegationAmountOfDelegator(suite.Ctx(), delegator.Staker, delegator.Delegator))) + Expect(resD.Delegator.CurrentReward).To(Equal(suite.App().DelegationKeeper.GetOutstandingRewards(suite.Ctx(), delegator.Staker, delegator.Delegator))) + + // Query: stakers_by_delegator/{delegator} + resSbD, errSbD := suite.App().QueryKeeper.StakersByDelegator(goCtx, &querytypes.QueryStakersByDelegatorRequest{ + Pagination: nil, + Delegator: delegator.Delegator, + }) + Expect(errSbD).To(BeNil()) + Expect(resSbD.Delegator).To(Equal(delegator.Delegator)) + for _, sRes := range resSbD.Stakers { + Expect(sRes.DelegationAmount).To(Equal(suite.App().DelegationKeeper.GetDelegationAmountOfDelegator(suite.Ctx(), sRes.Staker.Address, delegator.Delegator))) + Expect(sRes.CurrentReward).To(Equal(suite.App().DelegationKeeper.GetOutstandingRewards(suite.Ctx(), sRes.Staker.Address, delegator.Delegator))) + suite.verifyFullStaker(*sRes.Staker, sRes.Staker.Address) + } + } + + stakersDelegators := make(map[string]map[string]delegationtypes.Delegator) + for _, d := range suite.App().DelegationKeeper.GetAllDelegators(suite.Ctx()) { + if stakersDelegators[d.Staker] == nil { + stakersDelegators[d.Staker] = map[string]delegationtypes.Delegator{} + } + stakersDelegators[d.Staker][d.Delegator] = d + } + + for _, staker := range suite.App().StakersKeeper.GetAllStakers(suite.Ctx()) { + // Query: delegators_by_staker/{staker} + resDbS, errDbS := suite.App().QueryKeeper.DelegatorsByStaker(goCtx, &querytypes.QueryDelegatorsByStakerRequest{ + Pagination: nil, + Staker: staker.Address, + }) + Expect(errDbS).To(BeNil()) + + delegationData, _ := suite.App().DelegationKeeper.GetDelegationData(suite.Ctx(), staker.Address) + Expect(resDbS.TotalDelegatorCount).To(Equal(delegationData.DelegatorCount)) + Expect(resDbS.TotalDelegation).To(Equal(suite.App().DelegationKeeper.GetDelegationAmount(suite.Ctx(), staker.Address))) + + for _, delegator := range resDbS.Delegators { + Expect(stakersDelegators[delegator.Staker][delegator.Delegator]).ToNot(BeNil()) + Expect(delegator.DelegationAmount).To(Equal(suite.App().DelegationKeeper.GetDelegationAmountOfDelegator(suite.Ctx(), delegator.Staker, delegator.Delegator))) + Expect(delegator.CurrentReward).To(Equal(suite.App().DelegationKeeper.GetOutstandingRewards(suite.Ctx(), delegator.Staker, delegator.Delegator))) + } + } +} + +func (suite *KeeperTestSuite) VerifyDelegationModuleIntegrity() { + expectedBalance := uint64(0) + + for _, delegator := range suite.App().DelegationKeeper.GetAllDelegators(suite.Ctx()) { + expectedBalance += suite.App().DelegationKeeper.GetDelegationAmountOfDelegator(suite.Ctx(), delegator.Staker, delegator.Delegator) + expectedBalance += suite.App().DelegationKeeper.GetOutstandingRewards(suite.Ctx(), delegator.Staker, delegator.Delegator) + } + + // Due to rounding errors the delegation module will get a very few nKYVE over the time. + // As long as it is guaranteed that it's always the user who gets paid out less in case of + // rounding, everything is fine. + difference := suite.GetBalanceFromModule(delegationtypes.ModuleName) - expectedBalance + //nolint:all + Expect(difference >= 0).To(BeTrue()) + + // 10 should be enough for testing + Expect(difference <= 10).To(BeTrue()) +} + +func (suite *KeeperTestSuite) VerifyDelegationGenesisImportExport() { + genState := delegation.ExportGenesis(suite.Ctx(), suite.App().DelegationKeeper) + err := genState.Validate() + Expect(err).To(BeNil()) + delegation.InitGenesis(suite.Ctx(), suite.App().DelegationKeeper, *genState) +} + +// ========================= +// team module checks +// ========================= + +func (suite *KeeperTestSuite) VerifyTeamGenesisImportExport() { + genState := team.ExportGenesis(suite.Ctx(), suite.App().TeamKeeper) + + // Delete all entries in Stakers Store + store := suite.Ctx().KVStore(suite.App().TeamKeeper.StoreKey()) + iterator := store.Iterator(nil, nil) + keys := make([][]byte, 0) + for ; iterator.Valid(); iterator.Next() { + key := make([]byte, len(iterator.Key())) + copy(key, iterator.Key()) + keys = append(keys, key) + } + iterator.Close() + for _, key := range keys { + store.Delete(key) + } + + err := genState.Validate() + Expect(err).To(BeNil()) + team.InitGenesis(suite.Ctx(), suite.App().TeamKeeper, *genState) +} + +// ======================== +// helpers +// ======================== + +func (suite *KeeperTestSuite) verifyFullStaker(fullStaker querytypes.FullStaker, stakerAddress string) { + Expect(fullStaker.Address).To(Equal(stakerAddress)) + + staker, found := suite.App().StakersKeeper.GetStaker(suite.Ctx(), stakerAddress) + Expect(found).To(BeTrue()) + Expect(fullStaker.SelfDelegation).To(Equal(suite.App().DelegationKeeper.GetDelegationAmountOfDelegator(suite.Ctx(), stakerAddress, stakerAddress))) + + selfDelegationUnbonding := uint64(0) + for _, entry := range suite.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(suite.Ctx(), fullStaker.Address) { + if entry.Staker == stakerAddress { + selfDelegationUnbonding += entry.Amount + } + } + + Expect(fullStaker.SelfDelegationUnbonding).To(Equal(selfDelegationUnbonding)) + Expect(fullStaker.Metadata.Logo).To(Equal(staker.Logo)) + Expect(fullStaker.Metadata.Website).To(Equal(staker.Website)) + Expect(fullStaker.Metadata.Commission).To(Equal(staker.Commission)) + Expect(fullStaker.Metadata.Moniker).To(Equal(staker.Moniker)) + + pendingCommissionChange, found := suite.App().StakersKeeper.GetCommissionChangeEntryByIndex2(suite.Ctx(), stakerAddress) + if found { + Expect(fullStaker.Metadata.PendingCommissionChange.Commission).To(Equal(pendingCommissionChange.Commission)) + Expect(fullStaker.Metadata.PendingCommissionChange.CreationDate).To(Equal(pendingCommissionChange.CreationDate)) + } else { + Expect(fullStaker.Metadata.PendingCommissionChange).To(BeNil()) + } + + delegationData, _ := suite.App().DelegationKeeper.GetDelegationData(suite.Ctx(), stakerAddress) + Expect(fullStaker.DelegatorCount).To(Equal(delegationData.DelegatorCount)) + + Expect(fullStaker.TotalDelegation).To(Equal(suite.App().DelegationKeeper.GetDelegationAmount(suite.Ctx(), stakerAddress))) + + poolIds := make(map[uint64]bool) + + for _, poolMembership := range fullStaker.Pools { + poolIds[poolMembership.Pool.Id] = true + valaccount, found := suite.App().StakersKeeper.GetValaccount(suite.Ctx(), poolMembership.Pool.Id, stakerAddress) + Expect(found).To(BeTrue()) + + Expect(poolMembership.Valaddress).To(Equal(valaccount.Valaddress)) + Expect(poolMembership.IsLeaving).To(Equal(valaccount.IsLeaving)) + Expect(poolMembership.Points).To(Equal(valaccount.Points)) + + pool, found := suite.App().PoolKeeper.GetPool(suite.Ctx(), valaccount.PoolId) + Expect(found).To(BeTrue()) + Expect(poolMembership.Pool.Id).To(Equal(pool.Id)) + Expect(poolMembership.Pool.Logo).To(Equal(pool.Logo)) + Expect(poolMembership.Pool.TotalFunds).To(Equal(pool.TotalFunds)) + Expect(poolMembership.Pool.Name).To(Equal(pool.Name)) + Expect(poolMembership.Pool.Runtime).To(Equal(pool.Runtime)) + Expect(poolMembership.Pool.Status).To(Equal(suite.App().QueryKeeper.GetPoolStatus(suite.Ctx(), &pool))) + } + + // Reverse check the pool memberships + for _, valaccount := range suite.App().StakersKeeper.GetValaccountsFromStaker(suite.Ctx(), stakerAddress) { + Expect(poolIds[valaccount.PoolId]).To(BeTrue()) + } +} + +func (suite *KeeperTestSuite) deleteStore(store sdk.KVStore) { + iterator := store.Iterator(nil, nil) + keys := make([][]byte, 0) + for ; iterator.Valid(); iterator.Next() { + key := make([]byte, len(iterator.Key())) + copy(key, iterator.Key()) + keys = append(keys, key) + } + iterator.Close() + for _, key := range keys { + store.Delete(key) + } +} diff --git a/testutil/integration/helpers.go b/testutil/integration/helpers.go new file mode 100644 index 00000000..d202237a --- /dev/null +++ b/testutil/integration/helpers.go @@ -0,0 +1,43 @@ +package integration + +import ( + globalTypes "github.com/KYVENetwork/chain/x/global/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func (suite *KeeperTestSuite) GetBalanceFromAddress(address string) uint64 { + accAddress, err := sdk.AccAddressFromBech32(address) + if err != nil { + return 0 + } + + balance := suite.App().BankKeeper.GetBalance(suite.Ctx(), accAddress, globalTypes.Denom) + + return uint64(balance.Amount.Int64()) +} + +func (suite *KeeperTestSuite) GetBalanceFromModule(moduleName string) uint64 { + moduleAcc := suite.App().AccountKeeper.GetModuleAccount(suite.Ctx(), moduleName).GetAddress() + return suite.App().BankKeeper.GetBalance(suite.Ctx(), moduleAcc, globalTypes.Denom).Amount.Uint64() +} + +func (suite *KeeperTestSuite) GetNextUploader() (nextStaker string, nextValaddress string) { + bundleProposal, _ := suite.App().BundlesKeeper.GetBundleProposal(suite.Ctx(), 0) + + switch bundleProposal.NextUploader { + case STAKER_0: + nextStaker = STAKER_0 + nextValaddress = VALADDRESS_0 + case STAKER_1: + nextStaker = STAKER_1 + nextValaddress = VALADDRESS_1 + case STAKER_2: + nextStaker = STAKER_2 + nextValaddress = VALADDRESS_2 + default: + nextStaker = "" + nextValaddress = "" + } + + return +} diff --git a/testutil/integration/integration.go b/testutil/integration/integration.go new file mode 100644 index 00000000..0f924f6b --- /dev/null +++ b/testutil/integration/integration.go @@ -0,0 +1,253 @@ +package integration + +import ( + mrand "math/rand" + "time" + + globalTypes "github.com/KYVENetwork/chain/x/global/types" + + "github.com/KYVENetwork/chain/app" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + sdk "github.com/cosmos/cosmos-sdk/types" + mintTypes "github.com/cosmos/cosmos-sdk/x/mint/types" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/suite" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/tmhash" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmversion "github.com/tendermint/tendermint/proto/tendermint/version" + "github.com/tendermint/tendermint/version" +) + +const ( + ALICE = "kyve1jq304cthpx0lwhpqzrdjrcza559ukyy3zsl2vd" + BOB = "kyve1hvg7zsnrj6h29q9ss577mhrxa04rn94h7zjugq" + CHARLIE = "kyve1ay22rr3kz659fupu0tcswlagq4ql6rwm4nuv0s" + + STAKER_0 = "kyve1htgfatqevuvfzvl0sxp97ywteqhg5leha9emf4" + VALADDRESS_0 = "kyve1qnf86dkvvtpdukx30r3vajav7rdq8snktm90hm" + + STAKER_1 = "kyve1gnr35rwn8rmflnlzs6nn5hhkmzzkxg9ap8xepw" + VALADDRESS_1 = "kyve1hpjgzljglmv00nstk3jvcw0zzq94nu0cuxv5ga" + + STAKER_2 = "kyve1xsemlxghgvusumhqzm2ztjw7dz9krvu3de54e2" + VALADDRESS_2 = "kyve1u0870dkae6ql63hxvy9y7g65c0y8csfh8allzl" + + // To avoid giving burner permissions to a module for the tests + BURNER = "kyve1ld23ktfwc9zstaq8aanwkkj8cf0ru6adtz59y5" +) + +var ( + DUMMY []string + VALDUMMY []string +) + +const ( + KYVE = uint64(1_000_000_000) + TKYVE = uint64(1) +) + +var KYVE_DENOM = globalTypes.Denom + +func NewCleanChain() *KeeperTestSuite { + s := KeeperTestSuite{} + s.SetupTest(time.Now().Unix()) + s.initDummyAccounts() + return &s +} + +func NewCleanChainAtTime(startTime int64) *KeeperTestSuite { + s := KeeperTestSuite{} + s.SetupTest(startTime) + s.initDummyAccounts() + return &s +} + +func (suite *KeeperTestSuite) initDummyAccounts() { + _ = suite.Mint(ALICE, 1000*KYVE) + _ = suite.Mint(BOB, 1000*KYVE) + _ = suite.Mint(CHARLIE, 1000*KYVE) + + _ = suite.Mint(STAKER_0, 1000*KYVE) + _ = suite.Mint(VALADDRESS_0, 1000*KYVE) + + _ = suite.Mint(STAKER_1, 1000*KYVE) + _ = suite.Mint(VALADDRESS_1, 1000*KYVE) + + _ = suite.Mint(STAKER_2, 1000*KYVE) + _ = suite.Mint(VALADDRESS_2, 1000*KYVE) + + mrand.Seed(1) + + DUMMY = make([]string, 50) + + for i := 0; i < 50; i++ { + byteAddr := make([]byte, 20) + for k := 0; k < 20; k++ { + mrand.Seed(int64(i + k)) + byteAddr[k] = byte(mrand.Int()) + } + dummy, _ := sdk.Bech32ifyAddressBytes("kyve", byteAddr) + DUMMY[i] = dummy + _ = suite.Mint(dummy, 1000*KYVE) + } + + VALDUMMY = make([]string, 50) + for i := 0; i < 50; i++ { + byteAddr := make([]byte, 20) + for k := 0; k < 20; k++ { + mrand.Seed(int64(i + k + 100)) + byteAddr[k] = byte(mrand.Int()) + } + dummy, _ := sdk.Bech32ifyAddressBytes("kyve", byteAddr) + VALDUMMY[i] = dummy + _ = suite.Mint(dummy, 1000*KYVE) + } +} + +func (suite *KeeperTestSuite) Mint(address string, amount uint64) error { + coins := sdk.NewCoins(sdk.NewInt64Coin(KYVE_DENOM, int64(amount))) + err := suite.app.BankKeeper.MintCoins(suite.ctx, mintTypes.ModuleName, coins) + if err != nil { + return err + } + + suite.Commit() + + sender, err := sdk.AccAddressFromBech32(address) + if err != nil { + return err + } + + err = suite.app.BankKeeper.SendCoinsFromModuleToAccount(suite.ctx, mintTypes.ModuleName, sender, coins) + if err != nil { + return err + } + + return nil +} + +type QueryClients struct { + stakersClient stakerstypes.QueryClient +} + +type KeeperTestSuite struct { + suite.Suite + + ctx sdk.Context + + app *app.App + queries QueryClients + address common.Address + consAddress sdk.ConsAddress + validator stakingtypes.Validator + denom string +} + +func (suite *KeeperTestSuite) App() *app.App { + return suite.app +} + +func (suite *KeeperTestSuite) Ctx() sdk.Context { + return suite.ctx +} + +func (suite *KeeperTestSuite) SetCtx(ctx sdk.Context) { + suite.ctx = ctx +} + +func (suite *KeeperTestSuite) SetupTest(startTime int64) { + suite.SetupApp(startTime) +} + +func (suite *KeeperTestSuite) SetupApp(startTime int64) { + suite.app = app.Setup() + + suite.denom = globalTypes.Denom + + suite.address = common.HexToAddress("0xBf71F763e4DEd30139C40160AE74Df881D5C7A2d") + + // consensus key + ePriv := ed25519.GenPrivKeyFromSecret([]byte{1}) + suite.consAddress = sdk.ConsAddress(ePriv.PubKey().Address()) + + suite.ctx = suite.app.BaseApp.NewContext(false, tmproto.Header{ + Height: 1, + ChainID: "kyve-test", + Time: time.Unix(startTime, 0).UTC(), + ProposerAddress: suite.consAddress.Bytes(), + + Version: tmversion.Consensus{ + Block: version.BlockProtocol, + }, + LastBlockId: tmproto.BlockID{ + Hash: tmhash.Sum([]byte("block_id")), + PartSetHeader: tmproto.PartSetHeader{ + Total: 11, + Hash: tmhash.Sum([]byte("partset_header")), + }, + }, + AppHash: tmhash.Sum([]byte("app")), + DataHash: tmhash.Sum([]byte("data")), + EvidenceHash: tmhash.Sum([]byte("evidence")), + ValidatorsHash: tmhash.Sum([]byte("validators")), + NextValidatorsHash: tmhash.Sum([]byte("next_validators")), + ConsensusHash: tmhash.Sum([]byte("consensus")), + LastResultsHash: tmhash.Sum([]byte("last_result")), + }) + suite.registerQueryClients() + + mintParams := suite.app.MintKeeper.GetParams(suite.ctx) + mintParams.MintDenom = suite.denom + suite.app.MintKeeper.SetParams(suite.ctx, mintParams) + + stakingParams := suite.app.StakingKeeper.GetParams(suite.ctx) + stakingParams.BondDenom = suite.denom + suite.app.StakingKeeper.SetParams(suite.ctx, stakingParams) + + depositParams := suite.app.GovKeeper.GetDepositParams(suite.ctx) + depositParams.MinDeposit = sdk.NewCoins(sdk.NewInt64Coin(KYVE_DENOM, int64(100_000_000_000))) // set min deposit to 100 KYVE + suite.app.GovKeeper.SetDepositParams(suite.ctx, depositParams) + + // Set Validator + valAddr := sdk.ValAddress(suite.address.Bytes()) + validator, _ := stakingtypes.NewValidator(valAddr, ePriv.PubKey(), stakingtypes.Description{}) + validator = stakingkeeper.TestingUpdateValidator(suite.app.StakingKeeper, suite.ctx, validator, true) + _ = suite.app.StakingKeeper.AfterValidatorCreated(suite.ctx, validator.GetOperator()) + _ = suite.app.StakingKeeper.SetValidatorByConsAddr(suite.ctx, validator) + validators := suite.app.StakingKeeper.GetValidators(suite.ctx, 1) + suite.validator = validators[0] +} + +func (suite *KeeperTestSuite) Commit() { + suite.CommitAfter(time.Second * 0) +} + +func (suite *KeeperTestSuite) CommitAfterSeconds(seconds uint64) { + suite.CommitAfter(time.Second * time.Duration(seconds)) +} + +func (suite *KeeperTestSuite) CommitAfter(t time.Duration) { + header := suite.ctx.BlockHeader() + suite.app.EndBlock(abci.RequestEndBlock{Height: header.Height}) + _ = suite.app.Commit() + + header.Height += 1 + header.Time = header.Time.Add(t) + suite.app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + suite.ctx = suite.app.BaseApp.NewContext(false, header) + + suite.registerQueryClients() +} + +func (suite *KeeperTestSuite) registerQueryClients() { + queryHelper := baseapp.NewQueryServerTestHelper(suite.ctx, suite.app.InterfaceRegistry()) + + stakerstypes.RegisterQueryServer(queryHelper, suite.app.StakersKeeper) + suite.queries.stakersClient = stakerstypes.NewQueryClient(queryHelper) +} diff --git a/testutil/integration/transactions.go b/testutil/integration/transactions.go new file mode 100644 index 00000000..12a298e5 --- /dev/null +++ b/testutil/integration/transactions.go @@ -0,0 +1,87 @@ +package integration + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/gomega" +) + +func (suite *KeeperTestSuite) RunTx(msg sdk.Msg) (*sdk.Result, error) { + ctx, commit := suite.ctx.CacheContext() + handler := suite.App().MsgServiceRouter().Handler(msg) + + res, err := handler(ctx, msg) + if err != nil { + return nil, err + } + + commit() + return res, nil +} + +func (suite *KeeperTestSuite) RunTxSuccess(msg sdk.Msg) *sdk.Result { + result, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) + + return result +} + +func (suite *KeeperTestSuite) RunTxGovSuccess(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxGovError(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).To(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxPoolSuccess(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxPoolError(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).To(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxStakersSuccess(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxStakersError(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).To(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxDelegatorSuccess(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxDelegatorError(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).To(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxBundlesSuccess(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxBundlesError(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).To(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxTeamSuccess(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).NotTo(HaveOccurred()) +} + +func (suite *KeeperTestSuite) RunTxTeamError(msg sdk.Msg) { + _, err := suite.RunTx(msg) + Expect(err).To(HaveOccurred()) +} diff --git a/testutil/keeper/bundles.go b/testutil/keeper/bundles.go new file mode 100644 index 00000000..adf1c556 --- /dev/null +++ b/testutil/keeper/bundles.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "testing" + + "github.com/KYVENetwork/chain/x/bundles/keeper" + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + typesparams "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmdb "github.com/tendermint/tm-db" +) + +func BundlesKeeper(t testing.TB) (*keeper.Keeper, sdk.Context) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) + + paramsSubspace := typesparams.NewSubspace(cdc, + types.Amino, + storeKey, + memStoreKey, + "BundlesParams", + ) + //k := keeper.NewKeeper( + // cdc, + // storeKey, + // memStoreKey, + // paramsSubspace, + //) + _ = paramsSubspace + var k *keeper.Keeper + + ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) + + // Initialize params + k.SetParams(ctx, types.DefaultParams()) + + return k, ctx +} diff --git a/testutil/keeper/pool.go b/testutil/keeper/pool.go new file mode 100644 index 00000000..75efcdd8 --- /dev/null +++ b/testutil/keeper/pool.go @@ -0,0 +1,57 @@ +package keeper + +import ( + "testing" + + "github.com/KYVENetwork/chain/x/pool/keeper" + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + typesparams "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmdb "github.com/tendermint/tm-db" +) + +func PoolKeeper(t testing.TB) (*keeper.Keeper, sdk.Context) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) + + paramsSubspace := typesparams.NewSubspace(cdc, + types.Amino, + storeKey, + memStoreKey, + "PoolParams", + ) + _ = paramsSubspace + //k := keeper.NewKeeper( + // cdc, + // storeKey, + // memStoreKey, + // paramsSubspace, + // authkeeper.AccountKeeper{}, + // bankkeeper.BaseKeeper{}, + // distrkeeper.Keeper{}, + // , + //) + + ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) + + // Initialize params + // k.SetParams(ctx, types.DefaultParams()) + + return nil, ctx +} diff --git a/testutil/keeper/query.go b/testutil/keeper/query.go new file mode 100644 index 00000000..8322af24 --- /dev/null +++ b/testutil/keeper/query.go @@ -0,0 +1,59 @@ +package keeper + +import ( + "testing" + + "github.com/KYVENetwork/chain/x/query/keeper" + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + typesparams "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmdb "github.com/tendermint/tm-db" +) + +func QueryKeeper(t testing.TB) (*keeper.Keeper, sdk.Context) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) + + paramsSubspace := typesparams.NewSubspace(cdc, + types.Amino, + storeKey, + memStoreKey, + "QueryParams", + ) + // k := keeper.NewKeeper( + // cdc, + // storeKey, + // memStoreKey, + // paramsSubspace, + + // authkeeper.AccountKeeper{}, + // bankkeeper.BaseKeeper{}, + // distrkeeper.Keeper{}, + + // ) + _ = paramsSubspace + var k *keeper.Keeper + + ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) + + // Initialize params + // k.SetParams(ctx, types.DefaultParams()) + + return k, ctx +} diff --git a/testutil/keeper/stakers.go b/testutil/keeper/stakers.go new file mode 100644 index 00000000..b2f8e263 --- /dev/null +++ b/testutil/keeper/stakers.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "testing" + + "github.com/KYVENetwork/chain/x/stakers/keeper" + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + typesparams "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + tmdb "github.com/tendermint/tm-db" +) + +func StakersKeeper(t testing.TB) (*keeper.Keeper, sdk.Context) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) + + paramsSubspace := typesparams.NewSubspace(cdc, + types.Amino, + storeKey, + memStoreKey, + "StakersParams", + ) + //k := keeper.NewKeeper( + // cdc, + // storeKey, + // memStoreKey, + // paramsSubspace, + //) + _ = paramsSubspace + var k *keeper.Keeper + + ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) + + // Initialize params + k.SetParams(ctx, types.DefaultParams()) + + return k, ctx +} diff --git a/testutil/nullify/nullify.go b/testutil/nullify/nullify.go new file mode 100644 index 00000000..3b968c09 --- /dev/null +++ b/testutil/nullify/nullify.go @@ -0,0 +1,57 @@ +// Package nullify provides methods to init nil values structs for test assertion. +package nullify + +import ( + "reflect" + "unsafe" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + coinType = reflect.TypeOf(sdk.Coin{}) + coinsType = reflect.TypeOf(sdk.Coins{}) +) + +// Fill analyze all struct fields and slices with +// reflection and initialize the nil and empty slices, +// structs, and pointers. +func Fill(x interface{}) interface{} { + v := reflect.Indirect(reflect.ValueOf(x)) + switch v.Kind() { + case reflect.Slice: + for i := 0; i < v.Len(); i++ { + obj := v.Index(i) + objPt := reflect.NewAt(obj.Type(), unsafe.Pointer(obj.UnsafeAddr())).Interface() + objPt = Fill(objPt) + obj.Set(reflect.ValueOf(objPt)) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + f := reflect.Indirect(v.Field(i)) + if !f.CanSet() { + continue + } + switch f.Kind() { + case reflect.Slice: + f.Set(reflect.MakeSlice(f.Type(), 0, 0)) + case reflect.Struct: + switch f.Type() { + case coinType: + coin := reflect.New(coinType).Interface() + s := reflect.ValueOf(coin).Elem() + f.Set(s) + case coinsType: + coins := reflect.New(coinsType).Interface() + s := reflect.ValueOf(coins).Elem() + f.Set(s) + default: + objPt := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Interface() + s := Fill(objPt) + f.Set(reflect.ValueOf(s)) + } + } + } + } + return reflect.Indirect(v).Interface() +} diff --git a/testutil/sample/sample.go b/testutil/sample/sample.go new file mode 100644 index 00000000..98f2153e --- /dev/null +++ b/testutil/sample/sample.go @@ -0,0 +1,13 @@ +package sample + +import ( + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// AccAddress returns a sample account address +func AccAddress() string { + pk := ed25519.GenPrivKey().PubKey() + addr := pk.Address() + return sdk.AccAddress(addr).String() +} diff --git a/tools/tools.go b/tools/tools.go new file mode 100644 index 00000000..4e24981a --- /dev/null +++ b/tools/tools.go @@ -0,0 +1,12 @@ +//go:build tools +// +build tools + +// This file uses the recommended method for tracking developer tools in a Go module. +// +// REF: https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module +package tools + +import ( + _ "github.com/golangci/golangci-lint/cmd/golangci-lint" + _ "mvdan.cc/gofumpt" +) diff --git a/util/arrays.go b/util/arrays.go new file mode 100644 index 00000000..8cf4815b --- /dev/null +++ b/util/arrays.go @@ -0,0 +1,40 @@ +package util + +// RemoveFromUint64ArrayStable removes the first occurrence of `match` in the given `array`. +// It returns True if one element was removed. False otherwise. +// The order of the remaining elements is not changed. +func RemoveFromUint64ArrayStable(array []uint64, match uint64) ([]uint64, bool) { + for i, other := range array { + if other == match { + return append(array[0:i], array[i+1:]...), true + } + } + return array, false +} + +func RemoveFromStringArrayStable(array []string, match string) ([]string, bool) { + for i, other := range array { + if other == match { + return append(array[0:i], array[i+1:]...), true + } + } + return array, false +} + +func ContainsUint64(array []uint64, match uint64) bool { + for _, other := range array { + if other == match { + return true + } + } + return false +} + +func ContainsString(array []string, match string) bool { + for _, other := range array { + if other == match { + return true + } + } + return false +} diff --git a/util/logic.go b/util/logic.go new file mode 100644 index 00000000..117b9824 --- /dev/null +++ b/util/logic.go @@ -0,0 +1,37 @@ +package util + +import ( + "encoding/binary" + "fmt" +) + +type KeyPrefixBuilder struct { + Key []byte +} + +func (k *KeyPrefixBuilder) AInt(n uint64) { + indexBytes := make([]byte, 8) + binary.BigEndian.PutUint64(indexBytes, n) + k.Key = append(k.Key, indexBytes...) +} + +func (k *KeyPrefixBuilder) AString(s string) { + k.Key = append(k.Key, []byte(s)...) +} + +func GetByteKey(keys ...interface{}) []byte { + builder := KeyPrefixBuilder{Key: []byte{}} + for _, key := range keys { + switch v := key.(type) { + default: + panic(fmt.Sprintf("Unsupported Key Type: %T with value: %#v", v, key)) + case uint64: + builder.AInt(v) + case string: + builder.AString(v) + case []byte: + builder.Key = append(builder.Key, v...) + } + } + return builder.Key +} diff --git a/util/logic_bank.go b/util/logic_bank.go new file mode 100644 index 00000000..bc2337c6 --- /dev/null +++ b/util/logic_bank.go @@ -0,0 +1,96 @@ +package util + +import ( + globalTypes "github.com/KYVENetwork/chain/x/global/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type BankKeeper interface { + SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToModule(ctx sdk.Context, senderModule, recipientModule string, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error +} + +type DistrKeeper interface { + FundCommunityPool(ctx sdk.Context, amount sdk.Coins, sender sdk.AccAddress) error +} + +type AccountKeeper interface { + GetModuleAddress(moduleName string) sdk.AccAddress +} + +// TransferFromAddressToAddress sends tokens from the given address to a specified address. +func TransferFromAddressToAddress(bankKeeper BankKeeper, ctx sdk.Context, fromAddress string, toAddress string, amount uint64) error { + sender, errSenderAddress := sdk.AccAddressFromBech32(fromAddress) + if errSenderAddress != nil { + return errSenderAddress + } + + recipient, errRecipientAddress := sdk.AccAddressFromBech32(toAddress) + if errRecipientAddress != nil { + return errRecipientAddress + } + + coins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(amount))) + err := bankKeeper.SendCoins(ctx, sender, recipient, coins) + return err +} + +// TransferToAddress sends tokens from the given module to a specified address. +func TransferFromModuleToAddress(bankKeeper BankKeeper, ctx sdk.Context, module string, address string, amount uint64) error { + recipient, errAddress := sdk.AccAddressFromBech32(address) + if errAddress != nil { + return errAddress + } + + coins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(amount))) + err := bankKeeper.SendCoinsFromModuleToAccount(ctx, module, recipient, coins) + return err +} + +// TransferToRegistry sends tokens from a specified address to the given module. +func TransferFromAddressToModule(bankKeeper BankKeeper, ctx sdk.Context, address string, module string, amount uint64) error { + sender, errAddress := sdk.AccAddressFromBech32(address) + if errAddress != nil { + return errAddress + } + coins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(amount))) + + err := bankKeeper.SendCoinsFromAccountToModule(ctx, sender, module, coins) + return err +} + +// TransferInterModule ... +func TransferFromModuleToModule(bankKeeper BankKeeper, ctx sdk.Context, fromModule string, toModule string, amount uint64) error { + coins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(amount))) + err := bankKeeper.SendCoinsFromModuleToModule(ctx, fromModule, toModule, coins) + return err +} + +// transferToTreasury sends tokens from this module to the treasury (community spend pool). +func TransferFromAddressToTreasury(distrKeeper DistrKeeper, ctx sdk.Context, address string, amount uint64) error { + sender, errAddress := sdk.AccAddressFromBech32(address) + if errAddress != nil { + return errAddress + } + coins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(amount))) + + if err := distrKeeper.FundCommunityPool(ctx, coins, sender); err != nil { + return err + } + + return nil +} + +// transferToTreasury sends tokens from this module to the treasury (community spend pool). +func TransferFromModuleToTreasury(accountKeeper AccountKeeper, distrKeeper DistrKeeper, ctx sdk.Context, module string, amount uint64) error { + sender := accountKeeper.GetModuleAddress(module) + coins := sdk.NewCoins(sdk.NewInt64Coin(globalTypes.Denom, int64(amount))) + + if err := distrKeeper.FundCommunityPool(ctx, coins, sender); err != nil { + return err + } + + return nil +} diff --git a/util/logic_error.go b/util/logic_error.go new file mode 100644 index 00000000..e689e5d4 --- /dev/null +++ b/util/logic_error.go @@ -0,0 +1,39 @@ +package util + +import ( + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + upgradeTypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +type UpgradeKeeper interface { + ScheduleUpgrade(ctx sdk.Context, plan upgradeTypes.Plan) error +} + +// PanicHalt performs an emergency upgrade which immediately halts the chain +// The Team has to come up with a solution and develop a patch to handle +// the update. +// In a fully bug-free code this function will never be called. +// This function is there to do assertions and in case of a violation +// it will shut down the chain gracefully, to make it easier to recover from +// a fatal error +func PanicHalt(upgradeKeeper UpgradeKeeper, ctx sdk.Context, message string) { + // Choose next block for the upgrade + upgradeBlockHeight := ctx.BlockHeader().Height + 1 + + // Create emergency plan + plan := upgradeTypes.Plan{ + Name: "emergency_" + strconv.FormatInt(upgradeBlockHeight, 10), + Height: upgradeBlockHeight, + Info: "Emergency Halt; panic occurred; Error:" + message, + } + + // Directly submit emergency plan + // Errors can't occur with the current sdk-version + err := upgradeKeeper.ScheduleUpgrade(ctx, plan) + if err != nil { + // Can't happen with current sdk + panic("Emergency Halt failed: " + message) + } +} diff --git a/util/logic_math.go b/util/logic_math.go new file mode 100644 index 00000000..0c3ab982 --- /dev/null +++ b/util/logic_math.go @@ -0,0 +1,22 @@ +package util + +func MinInt(a int, b int) int { + if a < b { + return a + } + return b +} + +func MinUInt64(a uint64, b uint64) uint64 { + if a < b { + return a + } + return b +} + +func MaxUInt64(a uint64, b uint64) uint64 { + if a > b { + return a + } + return b +} diff --git a/util/validate.go b/util/validate.go new file mode 100644 index 00000000..b218e277 --- /dev/null +++ b/util/validate.go @@ -0,0 +1,36 @@ +package util + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func ValidateUint64(v interface{}) error { + _, ok := v.(uint64) + if !ok { + return fmt.Errorf("invalid parameter type: %T", v) + } + return nil +} + +func ValidatePercentage(v interface{}) error { + val, ok := v.(string) + if !ok { + return fmt.Errorf("invalid parameter type: %T", v) + } + + parsedVal, err := sdk.NewDecFromStr(val) + if err != nil { + return fmt.Errorf("invalid decimal representation: %T", v) + } + + if parsedVal.LT(sdk.NewDec(0)) { + return fmt.Errorf("percentage should be greater than or equal to 0") + } + if parsedVal.GT(sdk.NewDec(1)) { + return fmt.Errorf("percentage should be less than or equal to 1") + } + + return nil +} diff --git a/x/bundles/client/cli/query.go b/x/bundles/client/cli/query.go new file mode 100644 index 00000000..0504e0ee --- /dev/null +++ b/x/bundles/client/cli/query.go @@ -0,0 +1,31 @@ +package cli + +import ( + "fmt" + // "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + // "github.com/cosmos/cosmos-sdk/client/flags" + // sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/KYVENetwork/chain/x/bundles/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(queryRoute string) *cobra.Command { + // Group bundles queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdQueryParams()) + // this line is used by starport scaffolding # 1 + + return cmd +} diff --git a/x/bundles/client/cli/query_params.go b/x/bundles/client/cli/query_params.go new file mode 100644 index 00000000..7676e95a --- /dev/null +++ b/x/bundles/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/bundles/client/cli/tx.go b/x/bundles/client/cli/tx.go new file mode 100644 index 00000000..fad32184 --- /dev/null +++ b/x/bundles/client/cli/tx.go @@ -0,0 +1,27 @@ +package cli + +import ( + "fmt" + + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/spf13/cobra" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdClaimUploaderRole()) + cmd.AddCommand(CmdSkipUploaderRole()) + cmd.AddCommand(CmdSubmitBundleProposal()) + cmd.AddCommand(CmdVoteBundleProposal()) + + return cmd +} diff --git a/x/bundles/client/cli/tx_claim_uploader_role.go b/x/bundles/client/cli/tx_claim_uploader_role.go new file mode 100644 index 00000000..dddcfe83 --- /dev/null +++ b/x/bundles/client/cli/tx_claim_uploader_role.go @@ -0,0 +1,45 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdClaimUploaderRole() *cobra.Command { + cmd := &cobra.Command{ + Use: "claim-uploader-role [staker] [pool_id]", + Short: "Broadcast message claim-uploader-role", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argStaker := args[0] + + argPoolId, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgClaimUploaderRole( + clientCtx.GetFromAddress().String(), + argStaker, + argPoolId, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/bundles/client/cli/tx_skip_uploader_role.go b/x/bundles/client/cli/tx_skip_uploader_role.go new file mode 100644 index 00000000..ef55be89 --- /dev/null +++ b/x/bundles/client/cli/tx_skip_uploader_role.go @@ -0,0 +1,51 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdSkipUploaderRole() *cobra.Command { + cmd := &cobra.Command{ + Use: "skip-uploader-role [staker] [pool_id] [from_index]", + Short: "Broadcast message skip-uploader-role", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argStaker := args[0] + + argPoolId, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + argFromIndex, err := cast.ToUint64E(args[2]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgSkipUploaderRole( + clientCtx.GetFromAddress().String(), + argStaker, + argPoolId, + argFromIndex, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/bundles/client/cli/tx_submit_bundle_proposal.go b/x/bundles/client/cli/tx_submit_bundle_proposal.go new file mode 100644 index 00000000..7cb510f9 --- /dev/null +++ b/x/bundles/client/cli/tx_submit_bundle_proposal.go @@ -0,0 +1,78 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdSubmitBundleProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "submit-bundle-proposal [staker] [pool_id] [storage_id] [data_size] [data_hash] [from_index] [bundle_size] [from_key] [to_key] [bundle_summary]", + Short: "Broadcast message submit-bundle-proposal", + Args: cobra.ExactArgs(10), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argStaker := args[0] + + argPoolId, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + argStorageId := args[2] + + argDataSize, err := cast.ToUint64E(args[3]) + if err != nil { + return err + } + + argDataHash := args[4] + + argFromIndex, err := cast.ToUint64E(args[5]) + if err != nil { + return err + } + + argBundleSize, err := cast.ToUint64E(args[6]) + if err != nil { + return err + } + + argFromKey := args[7] + + argToKey := args[8] + + argBundleSummary := args[9] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgSubmitBundleProposal( + clientCtx.GetFromAddress().String(), + argStaker, + argPoolId, + argStorageId, + argDataSize, + argDataHash, + argFromIndex, + argBundleSize, + argFromKey, + argToKey, + argBundleSummary, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/bundles/client/cli/tx_vote_proposal.go b/x/bundles/client/cli/tx_vote_proposal.go new file mode 100644 index 00000000..e29c5991 --- /dev/null +++ b/x/bundles/client/cli/tx_vote_proposal.go @@ -0,0 +1,54 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdVoteBundleProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "vote-bundle-proposal [staker] [pool_id] [storage_id] [vote]", + Short: "Broadcast message vote-bundle-proposal", + Args: cobra.ExactArgs(4), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argStaker := args[0] + + argPoolId, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + argStorageId := args[2] + + argVote, err := cast.ToInt32E(args[3]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgVoteBundleProposal( + clientCtx.GetFromAddress().String(), + argStaker, + argPoolId, + argStorageId, + types.VoteType(argVote), + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/bundles/genesis.go b/x/bundles/genesis.go new file mode 100644 index 00000000..fbe90d98 --- /dev/null +++ b/x/bundles/genesis.go @@ -0,0 +1,34 @@ +package bundles + +import ( + "github.com/KYVENetwork/chain/x/bundles/keeper" + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the capability module's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + // this line is used by starport scaffolding # genesis/module/init + k.SetParams(ctx, genState.Params) + + for _, entry := range genState.BundleProposalList { + k.SetBundleProposal(ctx, entry) + } + + for _, entry := range genState.FinalizedBundleList { + k.SetFinalizedBundle(ctx, entry) + } +} + +// ExportGenesis returns the capability module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + genesis.Params = k.GetParams(ctx) + + genesis.BundleProposalList = k.GetAllBundleProposals(ctx) + + genesis.FinalizedBundleList = k.GetAllFinalizedBundles(ctx) + + return genesis +} diff --git a/x/bundles/keeper/getters_bundles.go b/x/bundles/keeper/getters_bundles.go new file mode 100644 index 00000000..060bdb4f --- /dev/null +++ b/x/bundles/keeper/getters_bundles.go @@ -0,0 +1,158 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// SetBundleProposal stores a current bundle proposal in the KV-Store. +// There is only one bundle proposal per pool +func (k Keeper) SetBundleProposal(ctx sdk.Context, bundleProposal types.BundleProposal) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BundleKeyPrefix) + b := k.cdc.MustMarshal(&bundleProposal) + store.Set(types.BundleProposalKey( + bundleProposal.PoolId, + ), b) +} + +// GetBundleProposal returns the bundle proposal for the given pool with id `poolId` +func (k Keeper) GetBundleProposal(ctx sdk.Context, poolId uint64) (val types.BundleProposal, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BundleKeyPrefix) + + b := store.Get(types.BundleProposalKey(poolId)) + if b == nil { + val.PoolId = poolId + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// GetAllBundleProposals returns all bundle proposals of all pools +func (k Keeper) GetAllBundleProposals(ctx sdk.Context) (list []types.BundleProposal) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.BundleKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + for ; iterator.Valid(); iterator.Next() { + var val types.BundleProposal + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// ===================== +// = Finalized Bundles = +// ===================== + +// SetFinalizedBundle stores a finalized bundle identified by its `poolId` and `id`. +func (k Keeper) SetFinalizedBundle(ctx sdk.Context, finalizedBundle types.FinalizedBundle) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.FinalizedBundlePrefix) + b := k.cdc.MustMarshal(&finalizedBundle) + store.Set(types.FinalizedBundleKey( + finalizedBundle.PoolId, + finalizedBundle.Id, + ), b) + + k.SetFinalizedBundleIndexes(ctx, finalizedBundle) +} + +// SetFinalizedBundleIndexes sets an in-memory reference for every bundle sorted by pool/fromIndex +// to allow querying for specific bundle ranges. +func (k Keeper) SetFinalizedBundleIndexes(ctx sdk.Context, finalizedBundle types.FinalizedBundle) { + indexByStorageHeight := prefix.NewStore(ctx.KVStore(k.memKey), types.FinalizedBundleByHeightPrefix) + indexByStorageHeight.Set( + types.FinalizedBundleByHeightKey(finalizedBundle.PoolId, finalizedBundle.FromIndex), + util.GetByteKey(finalizedBundle.Id)) +} + +func (k Keeper) GetAllFinalizedBundles(ctx sdk.Context) (list []types.FinalizedBundle) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.FinalizedBundlePrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + for ; iterator.Valid(); iterator.Next() { + var val types.FinalizedBundle + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +func (k Keeper) GetFinalizedBundlesByPool(ctx sdk.Context, poolId uint64) (list []types.FinalizedBundle) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.FinalizedBundlePrefix) + iterator := sdk.KVStorePrefixIterator(store, util.GetByteKey(poolId)) + + for ; iterator.Valid(); iterator.Next() { + var val types.FinalizedBundle + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetFinalizedBundle returns a finalized bundle by its identifier +func (k Keeper) GetFinalizedBundle(ctx sdk.Context, poolId, id uint64) (val types.FinalizedBundle, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.FinalizedBundlePrefix) + + b := store.Get(types.FinalizedBundleKey(poolId, id)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// TODO(postAudit,@max) consider performance improvement +func (k Keeper) GetPaginatedFinalizedBundleQuery(ctx sdk.Context, pagination *query.PageRequest, poolId uint64) ([]types.FinalizedBundle, *query.PageResponse, error) { + var data []types.FinalizedBundle + + store := prefix.NewStore(ctx.KVStore(k.storeKey), util.GetByteKey(types.FinalizedBundlePrefix, poolId)) + + pageRes, err := query.FilteredPaginate(store, pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + if accumulate { + var finalizedBundle types.FinalizedBundle + if err := k.cdc.Unmarshal(value, &finalizedBundle); err != nil { + return false, err + } + + data = append(data, finalizedBundle) + } + + return true, nil + }) + if err != nil { + return nil, nil, status.Error(codes.Internal, err.Error()) + } + + return data, pageRes, nil +} + +func (k Keeper) GetFinalizedBundleByHeight(ctx sdk.Context, poolId, height uint64) (val types.FinalizedBundle, found bool) { + proposalIndexStore := prefix.NewStore(ctx.KVStore(k.memKey), util.GetByteKey(types.FinalizedBundleByHeightPrefix, poolId)) + proposalIndexIterator := proposalIndexStore.ReverseIterator(nil, util.GetByteKey(height+1)) + defer proposalIndexIterator.Close() + + if proposalIndexIterator.Valid() { + bundleId := binary.BigEndian.Uint64(proposalIndexIterator.Value()) + + bundle, bundleFound := k.GetFinalizedBundle(ctx, poolId, bundleId) + if bundleFound { + if bundle.FromIndex <= height && bundle.ToIndex > height { + return bundle, true + } + } + } + return +} diff --git a/x/bundles/keeper/getters_params.go b/x/bundles/keeper/getters_params.go new file mode 100644 index 00000000..5979332a --- /dev/null +++ b/x/bundles/keeper/getters_params.go @@ -0,0 +1,46 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams returns the current x/bundles module parameters. +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + store := ctx.KVStore(k.storeKey) + + bz := store.Get(types.ParamsKey) + if bz == nil { + return params + } + + k.cdc.MustUnmarshal(bz, ¶ms) + return params +} + +// GetUploadTimeout returns the UploadTimeout param +func (k Keeper) GetUploadTimeout(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).UploadTimeout +} + +// GetStorageCost returns the StorageCost param +func (k Keeper) GetStorageCost(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).StorageCost +} + +// GetNetworkFee returns the NetworkFee param +func (k Keeper) GetNetworkFee(ctx sdk.Context) (res string) { + return k.GetParams(ctx).NetworkFee +} + +// GetMaxPoints returns the MaxPoints param +func (k Keeper) GetMaxPoints(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).MaxPoints +} + +// SetParams sets the x/bundles module parameters. +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(¶ms) + store.Set(types.ParamsKey, bz) +} diff --git a/x/bundles/keeper/grpc_query.go b/x/bundles/keeper/grpc_query.go new file mode 100644 index 00000000..617e81b2 --- /dev/null +++ b/x/bundles/keeper/grpc_query.go @@ -0,0 +1,7 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" +) + +var _ types.QueryServer = Keeper{} diff --git a/x/bundles/keeper/grpc_query_params.go b/x/bundles/keeper/grpc_query_params.go new file mode 100644 index 00000000..88a2d37e --- /dev/null +++ b/x/bundles/keeper/grpc_query_params.go @@ -0,0 +1,19 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/bundles/keeper/keeper.go b/x/bundles/keeper/keeper.go new file mode 100644 index 00000000..59c40483 --- /dev/null +++ b/x/bundles/keeper/keeper.go @@ -0,0 +1,84 @@ +package keeper + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + + authority string + + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper + distrkeeper types.DistrKeeper + poolKeeper types.PoolKeeper + stakerKeeper types.StakerKeeper + delegationKeeper types.DelegationKeeper + upgradeKeeper types.UpgradeKeeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storetypes.StoreKey, + memKey storetypes.StoreKey, + + authority string, + + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, + distrkeeper types.DistrKeeper, + poolKeeper types.PoolKeeper, + stakerKeeper types.StakerKeeper, + delegationKeeper types.DelegationKeeper, + upgradeKeeper types.UpgradeKeeper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + + authority: authority, + + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + distrkeeper: distrkeeper, + poolKeeper: poolKeeper, + stakerKeeper: stakerKeeper, + delegationKeeper: delegationKeeper, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +// A mem-store initialization needs to be performed in the begin-block. +// After a node restarts it will use the first begin-block which happens +// to rebuild the mem-store. After that `memStoreInitialized` indicates +// that the mem store was already built. +var memStoreInitialized = false + +func (k Keeper) InitMemStore(gasCtx sdk.Context) { + if !memStoreInitialized { + + // Update mem index + noGasCtx := gasCtx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) + for _, entry := range k.GetAllFinalizedBundles(noGasCtx) { + k.SetFinalizedBundleIndexes(noGasCtx, entry) + } + + memStoreInitialized = true + } +} diff --git a/x/bundles/keeper/keeper_suite_dropped_bundles_test.go b/x/bundles/keeper/keeper_suite_dropped_bundles_test.go new file mode 100644 index 00000000..63c34752 --- /dev/null +++ b/x/bundles/keeper/keeper_suite_dropped_bundles_test.go @@ -0,0 +1,184 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - dropped bundles + +* Produce a dropped bundle because not enough validators voted + +*/ + +var _ = Describe("dropped bundles", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 := s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 := s.GetBalanceFromAddress(i.VALADDRESS_1) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Produce a dropped bundle because not enough validators voted", func() { + // ARRANGE + // stake a bit more than first node so >50% is reached + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ACT + // do not vote so bundle gets dropped + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(Equal(uint64(1))) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + }) +}) diff --git a/x/bundles/keeper/keeper_suite_funding_bundles_test.go b/x/bundles/keeper/keeper_suite_funding_bundles_test.go new file mode 100644 index 00000000..5100d225 --- /dev/null +++ b/x/bundles/keeper/keeper_suite_funding_bundles_test.go @@ -0,0 +1,674 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - funding bundles + +* Produce a valid bundle with only one funder +* Produce a valid bundle with multiple funders and same funding amounts +* Produce a valid bundle with multiple funders and different funding amounts +* Produce a valid bundle with multiple funders and different funding amounts where not everyone can afford the funds +* Produce a dropped bundle because the only funder can not pay for the bundle reward +* Produce a dropped bundle because multiple funders with same amount can not pay for the bundle reward +* Produce a dropped bundle because multiple funders with different amount can not pay for the bundle reward + +*/ + +var _ = Describe("funding bundles", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceAlice := s.GetBalanceFromAddress(i.ALICE) + initialBalanceBob := s.GetBalanceFromAddress(i.BOB) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Produce a valid bundle with only one funder", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(Equal(100*i.KYVE - totalReward)) + Expect(pool.Funders).To(HaveLen(1)) + + // assert individual funds + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 100*i.KYVE)) + }) + + It("Produce a valid bundle with multiple funders and same funding amounts", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(Equal(200*i.KYVE - totalReward)) + Expect(pool.Funders).To(HaveLen(2)) + + // assert individual funds + fundersCharge := uint64(sdk.NewDec(int64(totalReward)).Quo(sdk.NewDec(2)).TruncateInt64()) + + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - fundersCharge)) + + Expect(pool.GetFunderAmount(i.BOB)).To(Equal(100*i.KYVE - fundersCharge)) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 100*i.KYVE)) + + balanceBob := s.GetBalanceFromAddress(i.BOB) + Expect(balanceBob).To(Equal(initialBalanceBob - 100*i.KYVE)) + }) + + It("Produce a valid bundle with multiple funders and different funding amounts", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 150 * i.KYVE, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 50 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(Equal(200*i.KYVE - totalReward)) + Expect(pool.Funders).To(HaveLen(2)) + + // assert individual funds + fundersCharge := uint64(sdk.NewDec(int64(totalReward)).Quo(sdk.NewDec(2)).TruncateInt64()) + + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(150*i.KYVE - fundersCharge)) + + Expect(pool.GetFunderAmount(i.BOB)).To(Equal(50*i.KYVE - fundersCharge)) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 150*i.KYVE)) + + balanceBob := s.GetBalanceFromAddress(i.BOB) + Expect(balanceBob).To(Equal(initialBalanceBob - 50*i.KYVE)) + }) + + It("Produce a valid bundle with multiple funders and different funding amounts where not everyone can afford the funds", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 10, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(Equal(100*i.KYVE - totalReward)) + Expect(pool.Funders).To(HaveLen(1)) + + // assert individual funds + fundersCharge := uint64(sdk.NewDec(int64(totalReward)).TruncateInt64()) + + Expect(pool.GetFunderAmount(i.BOB)).To(Equal(100*i.KYVE - fundersCharge)) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 10)) + + balanceBob := s.GetBalanceFromAddress(i.BOB) + Expect(balanceBob).To(Equal(initialBalanceBob - 100*i.KYVE)) + }) + + It("Produce a dropped bundle because the only funder can not pay for the bundle reward", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 10, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(BeZero()) + Expect(pool.Funders).To(BeEmpty()) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 10)) + }) + + It("Produce a dropped bundle because multiple funders with same amount can not pay for the bundle reward", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 10, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 10, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(BeZero()) + Expect(pool.Funders).To(BeEmpty()) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 10)) + + balanceBob := s.GetBalanceFromAddress(i.BOB) + Expect(balanceBob).To(Equal(initialBalanceBob - 10)) + }) + + It("Produce a dropped bundle because multiple funders with different amount can not pay for the bundle reward", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 10, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 20, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + // assert total pool funds + Expect(pool.TotalFunds).To(BeZero()) + Expect(pool.Funders).To(BeEmpty()) + + // assert individual balances + balanceAlice := s.GetBalanceFromAddress(i.ALICE) + Expect(balanceAlice).To(Equal(initialBalanceAlice - 10)) + + balanceBob := s.GetBalanceFromAddress(i.BOB) + Expect(balanceBob).To(Equal(initialBalanceBob - 20)) + }) +}) diff --git a/x/bundles/keeper/keeper_suite_invalid_bundles_test.go b/x/bundles/keeper/keeper_suite_invalid_bundles_test.go new file mode 100644 index 00000000..2bdb65b0 --- /dev/null +++ b/x/bundles/keeper/keeper_suite_invalid_bundles_test.go @@ -0,0 +1,560 @@ +package keeper_test + +import ( + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +/* + +TEST CASES - invalid bundles + +* Produce an invalid bundle with multiple validators and no foreign delegations +* Produce an invalid bundle with multiple validators and foreign delegations +* Produce an invalid bundle with multiple validators although some voted valid + +*/ + +var _ = Describe("invalid bundles", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 := s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 := s.GetBalanceFromAddress(i.VALADDRESS_1) + + initialBalanceStaker2 := s.GetBalanceFromAddress(i.STAKER_2) + initialBalanceValaddress2 := s.GetBalanceFromAddress(i.VALADDRESS_2) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + initialBalanceStaker2 = s.GetBalanceFromAddress(i.STAKER_2) + initialBalanceValaddress2 = s.GetBalanceFromAddress(i.VALADDRESS_2) + + s.CommitAfterSeconds(60) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Produce an invalid bundle with multiple validators and no foreign delegations", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // stake a bit more than first node so >50% is reached + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).NotTo(BeEmpty()) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + _, valaccountUploaderFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploaderFound).To(BeFalse()) + + balanceValaddress := s.GetBalanceFromAddress(i.VALADDRESS_0) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(i.STAKER_0) + + _, uploaderFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(uploaderFound).To(BeTrue()) + + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // calculate uploader slashes + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetUploadSlash(s.Ctx())) + slashAmount := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100*i.KYVE - slashAmount)) + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(200 * i.KYVE)) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + }) + + It("Produce an invalid bundle with multiple validators and foreign delegations", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 300 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // stake a bit more than first node so >50% is reached + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 300 * i.KYVE, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).NotTo(BeEmpty()) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + _, valaccountUploaderFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploaderFound).To(BeFalse()) + + balanceValaddress := s.GetBalanceFromAddress(i.VALADDRESS_0) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(i.STAKER_0) + _, uploaderFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(uploaderFound).To(BeTrue()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // calculate uploader slashes + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetUploadSlash(s.Ctx())) + slashAmountUploader := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + slashAmountDelegator := uint64(sdk.NewDec(int64(300 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100*i.KYVE - slashAmountUploader)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(300*i.KYVE - slashAmountDelegator)) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(500 * i.KYVE)) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.BOB)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + }) + + It("Produce an invalid bundle with multiple validators although some voted valid", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // stake a bit more than first node so >50% is reached + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 500 * i.KYVE, + }) + + // stake less so quorum is not that affected + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_2, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_2, + PoolId: 0, + Valaddress: i.VALADDRESS_2, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.CHARLIE, + Staker: i.STAKER_2, + Amount: 100 * i.KYVE, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceStaker2 = s.GetBalanceFromAddress(i.STAKER_2) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_2, + Staker: i.STAKER_2, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).NotTo(BeEmpty()) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + _, valaccountUploaderFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploaderFound).To(BeFalse()) + + balanceValaddress := s.GetBalanceFromAddress(i.VALADDRESS_0) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(i.STAKER_0) + _, uploaderFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(uploaderFound).To(BeTrue()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // calculate uploader slashes + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetUploadSlash(s.Ctx())) + slashAmountUploader := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + slashAmountDelegator1 := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100*i.KYVE - slashAmountUploader)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(100*i.KYVE - slashAmountDelegator1)) + + // calculate voter slashes + fraction, _ = sdk.NewDecFromStr(s.App().DelegationKeeper.GetVoteSlash(s.Ctx())) + slashAmountVoter := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + slashAmountDelegator2 := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_2, i.STAKER_2)).To(Equal(100*i.KYVE - slashAmountVoter)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_2, i.CHARLIE)).To(Equal(100*i.KYVE - slashAmountDelegator2)) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(600 * i.KYVE)) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.BOB)).To(BeZero()) + + // check voter2 status + _, valaccountVoterFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_2) + Expect(valaccountVoterFound).To(BeFalse()) + + balanceVoterValaddress = s.GetBalanceFromAddress(i.VALADDRESS_2) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress2)) + + balanceVoter = s.GetBalanceFromAddress(i.STAKER_2) + Expect(balanceVoter).To(Equal(initialBalanceStaker2)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_2, i.STAKER_2)).To(BeZero()) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_2, i.CHARLIE)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + }) +}) diff --git a/x/bundles/keeper/keeper_suite_points_test.go b/x/bundles/keeper/keeper_suite_points_test.go new file mode 100644 index 00000000..8c1defa5 --- /dev/null +++ b/x/bundles/keeper/keeper_suite_points_test.go @@ -0,0 +1,471 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - points + +* One validator does not vote for one proposal +* One validator votes after having not voted previously +* One validator does not vote for multiple proposals in a row +* One validator votes after having not voted previously multiple times +* One validator does not vote for multiple proposals and reaches max points +* One validator does not vote for multiple proposals and submits a bundle proposal +* One validator does not vote for multiple proposals and skip the uploader role +* One validator submits a bundle proposal where he reaches max points because he did not vote before + +*/ + +var _ = Describe("points", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.CommitAfterSeconds(60) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("One validator does not vote for one proposal", func() { + // ACT + // do not vote + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(Equal(uint64(1))) + }) + + It("One validator votes after having not voted previously", func() { + // ARRANGE + // do not vote + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 200, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + }) + + It("One validator does not vote for multiple proposals in a row", func() { + // ACT + for r := 1; r <= 3; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(Equal(uint64(3))) + }) + + It("One validator votes after having not voted previously multiple times", func() { + // ARRANGE + for r := 1; r <= 3; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 400, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + }) + + It("One validator does not vote for multiple proposals and reaches max points", func() { + // ARRANGE + maxPoints := int(s.App().BundlesKeeper.GetMaxPoints(s.Ctx())) + + // ACT + for r := 1; r <= maxPoints; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ASSERT + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, stakerFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_1) + Expect(stakerFound).To(BeTrue()) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountFound).To(BeFalse()) + + // check if voter got slashed + slashAmountRatio, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetTimeoutSlash(s.Ctx())) + expectedBalance := 50*i.KYVE - uint64(sdk.NewDec(int64(50*i.KYVE)).Mul(slashAmountRatio).RoundInt64()) + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1))) + }) + + It("One validator does not vote for multiple proposals and submits a bundle proposal", func() { + // ARRANGE + for r := 1; r <= 3; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ACT + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 400, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + // points are instantly 1 because node did not vote on this bundle, too + Expect(valaccountVoter.Points).To(Equal(uint64(1))) + }) + + It("One validator does not vote for multiple proposals and skip the uploader role", func() { + // ARRANGE + for r := 1; r <= 3; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ACT + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.RunTxBundlesSuccess(&bundletypes.MsgSkipUploaderRole{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + FromIndex: 400, + }) + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + }) + + It("One validator submits a bundle proposal where he reaches max points because he did not vote before", func() { + // ARRANGE + maxPoints := int(s.App().BundlesKeeper.GetMaxPoints(s.Ctx())) - 1 + + for r := 1; r <= maxPoints; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ACT + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 500, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + // points are instantly 1 because node did not vote on this bundle, too + Expect(valaccountVoter.Points).To(Equal(uint64(1))) + }) +}) diff --git a/x/bundles/keeper/keeper_suite_stakers_leave_test.go b/x/bundles/keeper/keeper_suite_stakers_leave_test.go new file mode 100644 index 00000000..0e159038 --- /dev/null +++ b/x/bundles/keeper/keeper_suite_stakers_leave_test.go @@ -0,0 +1,592 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - stakers leave + +* Staker leaves, although he is the next uploader and runs into the upload timeout +* Staker leaves, although he was the uploader of the previous round and should receive the uploader reward +* Staker leaves, although he was the uploader of the previous round and should get slashed +* Staker leaves, although he was a voter in the previous round and should get slashed +* Staker leaves, although he was a voter in the previous round and should get a point +* Staker leaves, although he was a voter who did not vote max points in a row should not get slashed + +*/ + +var _ = Describe("stakers leave", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + //initialBalanceValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + // + initialBalanceStaker1 := s.GetBalanceFromAddress(i.STAKER_1) + //initialBalanceValaddress1 := s.GetBalanceFromAddress(i.VALADDRESS_1) + // + //initialBalanceStaker2 := s.GetBalanceFromAddress(i.STAKER_2) + //initialBalanceValaddress2 := s.GetBalanceFromAddress(i.VALADDRESS_2) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + //initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + // + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + //initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + // + //initialBalanceStaker2 = s.GetBalanceFromAddress(i.STAKER_2) + //initialBalanceValaddress2 = s.GetBalanceFromAddress(i.VALADDRESS_2) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Staker leaves, although he is the next uploader and runs into the upload timeout", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // ACT + s.CommitAfterSeconds(60) + + // leave pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_0) + + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + // check if next uploader is still removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + Expect(poolStakers[0]).To(Equal(i.STAKER_1)) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountFound).To(BeFalse()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(50 * i.KYVE)) + + // check if next uploader got not slashed + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Staker leaves, although he was the uploader of the previous round and should receive the uploader reward", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + // leave pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_0) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(Equal("18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4")) + + // check if next uploader is still removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + Expect(poolStakers[0]).To(Equal(i.STAKER_1)) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountFound).To(BeFalse()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(50 * i.KYVE)) + + // check if next uploader got not slashed + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + + // check if next uploader received the uploader reward + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + balanceUploader := s.GetBalanceFromAddress(i.STAKER_0) + + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + uploaderDelegationReward := totalUploaderReward - uploaderPayoutReward + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + }) + + It("Staker leaves, although he was the uploader of the previous round and should get slashed", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + // leave pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_0) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + // check if next uploader is still removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + Expect(poolStakers[0]).To(Equal(i.STAKER_1)) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountFound).To(BeFalse()) + + // check if next uploader got slashed + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetUploadSlash(s.Ctx())) + slashAmount := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100*i.KYVE - slashAmount)) + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(200 * i.KYVE)) + + // check if next uploader did not receive the uploader reward + balanceUploader := s.GetBalanceFromAddress(i.STAKER_0) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + }) + + It("Staker leaves, although he was a voter in the previous round and should get slashed", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + // leave pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_1) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4")) + + // check if next uploader is still removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + Expect(poolStakers[0]).To(Equal(i.STAKER_0)) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountFound).To(BeFalse()) + + // check if voter got slashed + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetVoteSlash(s.Ctx())) + slashAmount := uint64(sdk.NewDec(int64(50 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(Equal(50*i.KYVE - slashAmount)) + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader did not receive any rewards + balanceVoter := s.GetBalanceFromAddress(i.STAKER_1) + + // assert payout transfer + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + }) + + It("Staker leaves, although he was a voter in the previous round and should get a point", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + + // leave pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_1) + + // do not vote + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4")) + + // check if next uploader is still removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + Expect(poolStakers[0]).To(Equal(i.STAKER_0)) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountFound).To(BeFalse()) + + // check if voter status + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(Equal(50 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader did not receive any rewards + balanceVoter := s.GetBalanceFromAddress(i.STAKER_1) + + // assert payout transfer + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + }) + + It("Staker leaves, although he was a voter who did not vote max points in a row should not get slashed", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.CommitAfterSeconds(60) + + maxPoints := int(s.App().BundlesKeeper.GetMaxPoints(s.Ctx())) + + for r := 0; r < maxPoints; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + + // leave pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_1) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 500, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("18SRvVuCrB8vy_OCLBaNbXONMVGeflGcw4gGTZ1oUt4")) + + // check if next uploader is still removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + Expect(poolStakers[0]).To(Equal(i.STAKER_0)) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountFound).To(BeFalse()) + + // check if voter not got slashed + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(Equal(50 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader did not receive any rewards + balanceVoter := s.GetBalanceFromAddress(i.STAKER_1) + + // assert payout transfer + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + }) +}) diff --git a/x/bundles/keeper/keeper_suite_test.go b/x/bundles/keeper/keeper_suite_test.go new file mode 100644 index 00000000..234862ba --- /dev/null +++ b/x/bundles/keeper/keeper_suite_test.go @@ -0,0 +1,16 @@ +package keeper_test + +import ( + "fmt" + "testing" + + "github.com/KYVENetwork/chain/x/bundles/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestBundlesKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Keeper Test Suite", types.ModuleName)) +} diff --git a/x/bundles/keeper/keeper_suite_valid_bundles_test.go b/x/bundles/keeper/keeper_suite_valid_bundles_test.go new file mode 100644 index 00000000..6a2665bd --- /dev/null +++ b/x/bundles/keeper/keeper_suite_valid_bundles_test.go @@ -0,0 +1,1142 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +/* + +TEST CASES - valid bundles + +* Produce a valid bundle with one validator and no foreign delegations +* Produce a valid bundle with one validator and foreign delegations +* Produce a valid bundle with multiple validators and no foreign delegations +* Produce a valid bundle with multiple validators and foreign delegations +* Produce a valid bundle with multiple validators and foreign delegation although some did not vote at all +* Produce a valid bundle with multiple validators and foreign delegation although some voted abstain +* Produce a valid bundle with multiple validators and foreign delegation although some voted invalid + +*/ + +var _ = Describe("valid bundles", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 := s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 := s.GetBalanceFromAddress(i.VALADDRESS_1) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Produce a valid bundle with one validator and no foreign delegations", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + uploaderDelegationReward := totalUploaderReward - uploaderPayoutReward + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Produce a valid bundle with one validator and foreign delegations", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 300 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + totalDelegationReward := totalUploaderReward - uploaderPayoutReward + + // divide with 4 because uploader only has 25% of total delegation + uploaderDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).TruncateInt64()) + delegatorDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).Mul(sdk.NewDec(3)).TruncateInt64()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + // assert delegator delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(delegatorDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Produce a valid bundle with multiple validators and no foreign delegations", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + // TODO(postAudit,@troy): how to get next uploader deterministically? + Expect(bundleProposal.NextUploader).NotTo(BeEmpty()) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceUploaderValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceUploaderValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + uploaderDelegationReward := totalUploaderReward - uploaderPayoutReward + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Produce a valid bundle with one validator and foreign delegations", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 200 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 300 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + // TODO(postAudit,@troy): how to get next uploader deterministically? + Expect(bundleProposal.NextUploader).NotTo(BeEmpty()) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceUploaderValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceUploaderValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + totalDelegationReward := totalUploaderReward - uploaderPayoutReward + + // divide with 4 because uploader only has 25% of total delegation + uploaderDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(3)).TruncateInt64()) + delegatorDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(3)).Mul(sdk.NewDec(2)).TruncateInt64()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + // assert delegator delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(delegatorDelegationReward)) + + // check voter rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.BOB)).To(BeZero()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Produce a valid bundle with multiple validators and foreign delegation although some did not vote at all", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 300 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceUploaderValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceUploaderValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(Equal(uint64(1))) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + totalDelegationReward := totalUploaderReward - uploaderPayoutReward + + // divide with 4 because uploader only has 25% of total delegation + uploaderDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).TruncateInt64()) + delegatorDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).Mul(sdk.NewDec(3)).TruncateInt64()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + // assert delegator delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(delegatorDelegationReward)) + + // check voter rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.BOB)).To(BeZero()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Produce a valid bundle with multiple validators and foreign delegation although some voted abstain", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 300 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceUploaderValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceUploaderValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + totalDelegationReward := totalUploaderReward - uploaderPayoutReward + + // divide with 4 because uploader only has 25% of total delegation + uploaderDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).TruncateInt64()) + delegatorDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).Mul(sdk.NewDec(3)).TruncateInt64()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + // assert delegator delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(delegatorDelegationReward)) + + // check voter rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.BOB)).To(BeZero()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Produce a valid bundle with multiple validators and foreign delegation although some voted invalid", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 300 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).NotTo(BeEmpty()) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceUploaderValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceUploaderValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + uploader, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), valaccountUploader.Staker) + + // calculate voter slashes + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetVoteSlash(s.Ctx())) + slashAmountVoter := uint64(sdk.NewDec(int64(200 * i.KYVE)).Mul(fraction).TruncateInt64()) + slashAmountDelegator := uint64(sdk.NewDec(int64(100 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(Equal(200*i.KYVE - slashAmountVoter)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.BOB)).To(Equal(100*i.KYVE - slashAmountDelegator)) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(400 * i.KYVE)) + + // check voter status + _, valaccountVoterFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoterFound).To(BeFalse()) + + balanceVoterValaddress := s.GetBalanceFromAddress(i.VALADDRESS_1) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(i.STAKER_1) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + commission, _ := sdk.NewDecFromStr(uploader.Commission) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + uploaderPayoutReward := uint64(sdk.NewDec(int64(totalUploaderReward)).Mul(commission).TruncateInt64()) + totalDelegationReward := totalUploaderReward - uploaderPayoutReward + + // divide with 4 because uploader only has 25% of total delegation + uploaderDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).TruncateInt64()) + delegatorDelegationReward := uint64(sdk.NewDec(int64(totalDelegationReward)).Quo(sdk.NewDec(4)).Mul(sdk.NewDec(3)).TruncateInt64()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + // assert delegator delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.ALICE)).To(Equal(delegatorDelegationReward)) + + // check voter rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.BOB)).To(BeZero()) + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + uploaderPayoutReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(uploaderDelegationReward)) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) +}) diff --git a/x/bundles/keeper/keeper_suite_zero_delegation_test.go b/x/bundles/keeper/keeper_suite_zero_delegation_test.go new file mode 100644 index 00000000..88e3ce81 --- /dev/null +++ b/x/bundles/keeper/keeper_suite_zero_delegation_test.go @@ -0,0 +1,786 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - zero delegation + +* Staker votes with zero delegation +* Staker receives vote slash with zero delegation +* Staker submit bundle proposal with zero delegation +* Staker receives upload slash with zero delegation +* Staker receives timeout slash because votes were missed +* Stakers try to produce valid bundle but all stakers have zero delegation + +*/ + +var _ = Describe("valid bundles", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 := s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 := s.GetBalanceFromAddress(i.VALADDRESS_1) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.CommitAfterSeconds(60) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Staker votes with zero delegation", func() { + // ARRANGE + // create normal validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // create zero delegation validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + }) + + It("Staker receives vote slash with zero delegation", func() { + // ARRANGE + // create normal validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // create zero delegation validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + + // calculate voter slashes + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetVoteSlash(s.Ctx())) + slashAmountVoter := uint64(sdk.NewDec(int64(0 * i.KYVE)).Mul(fraction).TruncateInt64()) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(Equal(0*i.KYVE - slashAmountVoter)) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100*i.KYVE - slashAmountVoter)) + }) + + It("Staker submit bundle proposal with zero delegation", func() { + // ARRANGE + // create zero delegation validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + // create normal validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + s.CommitAfterSeconds(60) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("99")) + Expect(pool.CurrentSummary).To(Equal("test_value")) + Expect(pool.CurrentIndex).To(Equal(uint64(100))) + Expect(pool.TotalBundles).To(Equal(uint64(1))) + + // check if finalized bundle got saved + finalizedBundle, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeTrue()) + + Expect(finalizedBundle.PoolId).To(Equal(uint64(0))) + Expect(finalizedBundle.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(finalizedBundle.Uploader).To(Equal(i.STAKER_0)) + Expect(finalizedBundle.FromIndex).To(Equal(uint64(0))) + Expect(finalizedBundle.ToIndex).To(Equal(uint64(100))) + Expect(finalizedBundle.FromKey).To(Equal("0")) + Expect(finalizedBundle.ToKey).To(Equal("99")) + Expect(finalizedBundle.BundleSummary).To(Equal("test_value")) + Expect(finalizedBundle.DataHash).To(Equal("test_hash")) + Expect(finalizedBundle.FinalizedAt).NotTo(BeZero()) + + // check if next bundle proposal got registered + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash2")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("100")) + Expect(bundleProposal.ToKey).To(Equal("199")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value2")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceUploaderValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceUploaderValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + // calculate uploader rewards + totalReward := 100*s.App().BundlesKeeper.GetStorageCost(s.Ctx()) + pool.OperatingCost + networkFee, _ := sdk.NewDecFromStr(s.App().BundlesKeeper.GetNetworkFee(s.Ctx())) + + treasuryReward := uint64(sdk.NewDec(int64(totalReward)).Mul(networkFee).TruncateInt64()) + totalUploaderReward := totalReward - treasuryReward + + // assert payout transfer + Expect(balanceUploader).To(Equal(initialBalanceStaker0 + totalUploaderReward)) + // assert uploader self delegation rewards + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100*i.KYVE - totalReward)) + }) + + It("Staker receives upload slash with zero delegation", func() { + // ARRANGE + // create zero delegation validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + // create normal validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + s.CommitAfterSeconds(60) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash2", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value2", + }) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + _, valaccountUploaderFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploaderFound).To(BeFalse()) + + balanceValaddress := s.GetBalanceFromAddress(i.VALADDRESS_0) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(i.STAKER_0) + _, uploaderFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(uploaderFound).To(BeTrue()) + + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // calculate uploader slashes + fraction, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetUploadSlash(s.Ctx())) + slashAmount := uint64(sdk.NewDec(int64(0 * i.KYVE)).Mul(fraction).TruncateInt64()) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(0*i.KYVE - slashAmount)) + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100*i.KYVE - slashAmount)) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + }) + + It("Staker receives timeout slash because votes were missed", func() { + // ARRANGE + // create normal validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // create zero delegation validator + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.CommitAfterSeconds(60) + + // ACT + maxPoints := int(s.App().BundlesKeeper.GetMaxPoints(s.Ctx())) + + for r := 1; r <= maxPoints; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // ASSERT + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, stakerFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_1) + Expect(stakerFound).To(BeTrue()) + + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountFound).To(BeFalse()) + + // check if voter got slashed + slashAmountRatio, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetTimeoutSlash(s.Ctx())) + expectedBalance := 0*i.KYVE - uint64(sdk.NewDec(int64(0*i.KYVE)).Mul(slashAmountRatio).RoundInt64()) + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1))) + }) + + It("Stakers try to produce valid bundle but all stakers have zero delegation", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 0 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + + // manually set next staker + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + // check if bundle got not finalized on pool + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + + Expect(pool.CurrentKey).To(Equal("")) + Expect(pool.CurrentSummary).To(BeEmpty()) + Expect(pool.CurrentIndex).To(BeZero()) + Expect(pool.TotalBundles).To(BeZero()) + + // check if finalized bundle exists + _, finalizedBundleFound := s.App().BundlesKeeper.GetFinalizedBundle(s.Ctx(), 0, 0) + Expect(finalizedBundleFound).To(BeFalse()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // check uploader status + valaccountUploader, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccountUploader.Points).To(BeZero()) + + balanceValaddress := s.GetBalanceFromAddress(valaccountUploader.Valaddress) + Expect(balanceValaddress).To(Equal(initialBalanceValaddress0)) + + balanceUploader := s.GetBalanceFromAddress(valaccountUploader.Staker) + + Expect(balanceUploader).To(Equal(initialBalanceStaker0)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(BeZero()) + + // check voter status + valaccountVoter, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountVoter.Points).To(BeZero()) + + balanceVoterValaddress := s.GetBalanceFromAddress(valaccountVoter.Valaddress) + Expect(balanceVoterValaddress).To(Equal(initialBalanceValaddress1)) + + balanceVoter := s.GetBalanceFromAddress(valaccountVoter.Staker) + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + + Expect(balanceVoter).To(Equal(initialBalanceStaker1)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(BeZero()) + + // check pool funds + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + }) +}) diff --git a/x/bundles/keeper/logic_bundles.go b/x/bundles/keeper/logic_bundles.go new file mode 100644 index 00000000..1c3f56e3 --- /dev/null +++ b/x/bundles/keeper/logic_bundles.go @@ -0,0 +1,544 @@ +package keeper + +import ( + "encoding/binary" + "math/rand" + "sort" + + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// AssertPoolCanRun checks whether the given pool fulfils all +// technical/formal requirements to produce bundles +func (k Keeper) AssertPoolCanRun(ctx sdk.Context, poolId uint64) error { + pool, poolErr := k.poolKeeper.GetPoolWithError(ctx, poolId) + if poolErr != nil { + return poolErr + } + + // Error if the pool is upgrading. + if pool.UpgradePlan.ScheduledAt > 0 && uint64(ctx.BlockTime().Unix()) >= pool.UpgradePlan.ScheduledAt { + return types.ErrPoolCurrentlyUpgrading + } + + // Error if the pool is disabled. + if pool.Disabled { + return types.ErrPoolDisabled + } + + // Error if the pool has no funds. + if len(pool.Funders) == 0 { + return types.ErrPoolOutOfFunds + } + + // Error if min delegation is not reached + if k.delegationKeeper.GetDelegationOfPool(ctx, pool.Id) < pool.MinDelegation { + return types.ErrMinDelegationNotReached + } + + return nil +} + +// AssertCanVote checks whether a participant in the network can vote on +// a bundle proposal in a storage pool +func (k Keeper) AssertCanVote(ctx sdk.Context, poolId uint64, staker string, voter string, storageId string) error { + // Check basic pool configs + if err := k.AssertPoolCanRun(ctx, poolId); err != nil { + return err + } + + // Check if sender is a staker in pool + if err := k.stakerKeeper.AssertValaccountAuthorized(ctx, poolId, staker, voter); err != nil { + return err + } + + bundleProposal, _ := k.GetBundleProposal(ctx, poolId) + + // Check if dropped bundle + if bundleProposal.StorageId == "" { + return types.ErrBundleDropped + } + + // Check if tx matches current bundleProposal + if storageId != bundleProposal.StorageId { + return types.ErrInvalidStorageId + } + + // Check if the sender has already voted on the bundle. + hasVotedValid := util.ContainsString(bundleProposal.VotersValid, staker) + hasVotedInvalid := util.ContainsString(bundleProposal.VotersInvalid, staker) + + if hasVotedValid { + return types.ErrAlreadyVotedValid + } + + if hasVotedInvalid { + return types.ErrAlreadyVotedInvalid + } + + return nil +} + +// AssertCanPropose checks whether a participant can submit the next bundle +// proposal in a storage pool +func (k Keeper) AssertCanPropose(ctx sdk.Context, poolId uint64, staker string, proposer string, fromIndex uint64) error { + // Check basic pool configs + if err := k.AssertPoolCanRun(ctx, poolId); err != nil { + return err + } + + // Check if sender is a staker in pool + if err := k.stakerKeeper.AssertValaccountAuthorized(ctx, poolId, staker, proposer); err != nil { + return err + } + + pool, _ := k.poolKeeper.GetPoolWithError(ctx, poolId) + bundleProposal, _ := k.GetBundleProposal(ctx, poolId) + + // Check if designated uploader + if bundleProposal.NextUploader != staker { + return sdkErrors.Wrapf(types.ErrNotDesignatedUploader, "expected %v received %v", bundleProposal.NextUploader, staker) + } + + // Check if upload interval has been surpassed + if uint64(ctx.BlockTime().Unix()) < (bundleProposal.UpdatedAt + pool.UploadInterval) { + return sdkErrors.Wrapf(types.ErrUploadInterval, "expected %v < %v", ctx.BlockTime().Unix(), bundleProposal.UpdatedAt+pool.UploadInterval) + } + + // Check if from_index matches + if pool.CurrentIndex+bundleProposal.BundleSize != fromIndex { + return sdkErrors.Wrapf(types.ErrFromIndex, "expected %v received %v", pool.CurrentIndex+bundleProposal.BundleSize, fromIndex) + } + + return nil +} + +// validateSubmitBundleArgs validates various bundle proposal metadata for correctness and +// fails if at least one requirement is not met +func (k Keeper) validateSubmitBundleArgs(ctx sdk.Context, bundleProposal *types.BundleProposal, msg *types.MsgSubmitBundleProposal) error { + pool, err := k.poolKeeper.GetPoolWithError(ctx, msg.PoolId) + if err != nil { + return err + } + + // Validate storage id + if msg.StorageId == "" { + return types.ErrInvalidArgs + } + + // Validate from index + if pool.CurrentIndex+bundleProposal.BundleSize != msg.FromIndex { + return sdkErrors.Wrapf(types.ErrFromIndex, "expected %v received %v", pool.CurrentIndex+bundleProposal.BundleSize, msg.FromIndex) + } + + // Validate if bundle is bigger than zero + if msg.BundleSize == 0 { + return types.ErrInvalidArgs + } + + // Validate if bundle is not too big + if msg.BundleSize > pool.MaxBundleSize { + return sdkErrors.Wrapf(types.ErrMaxBundleSize, "expected %v received %v", pool.MaxBundleSize, msg.BundleSize) + } + + // Validate key values + if msg.FromKey == "" || msg.ToKey == "" { + return types.ErrInvalidArgs + } + + return nil +} + +// slashDelegatorsAndRemoveStaker slashes a staker with a certain slashType and all including +// delegators and removes him from the storage pool +func (k Keeper) slashDelegatorsAndRemoveStaker(ctx sdk.Context, poolId uint64, stakerAddress string, slashType delegationTypes.SlashType) { + k.delegationKeeper.SlashDelegators(ctx, poolId, stakerAddress, slashType) + k.stakerKeeper.LeavePool(ctx, stakerAddress, poolId) +} + +// resetPoints resets the points from a valaccount to zero +func (k Keeper) resetPoints(ctx sdk.Context, poolId uint64, stakerAddress string) { + previousPoints := k.stakerKeeper.ResetPoints(ctx, poolId, stakerAddress) + + // only reset points if valaccount has at least a point + if previousPoints > 0 { + _ = ctx.EventManager().EmitTypedEvent(&types.EventPointsReset{ + PoolId: poolId, + Staker: stakerAddress, + }) + } +} + +// addPoint increases the points of a valaccount with one and automatically +// slashes and removes the staker once he reaches max points +func (k Keeper) addPoint(ctx sdk.Context, poolId uint64, stakerAddress string) { + // Add one point to staker in given pool + points := k.stakerKeeper.IncrementPoints(ctx, poolId, stakerAddress) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventPointIncreased{ + PoolId: poolId, + Staker: stakerAddress, + CurrentPoints: points, + }) + + if points >= k.GetMaxPoints(ctx) { + // slash all delegators with a timeout slash and remove staker from pool. + // points are reset due to the valaccount being deleted while leaving the pool + k.slashDelegatorsAndRemoveStaker(ctx, poolId, stakerAddress, delegationTypes.SLASH_TYPE_TIMEOUT) + } +} + +// handleNonVoters checks if stakers in a pool voted on the current bundle proposal +// if a staker did not vote at all on a bundle proposal he received points +// if a staker receives a certain number of points he receives a timeout slash and gets +// kicked out of a pool +func (k Keeper) handleNonVoters(ctx sdk.Context, poolId uint64) { + voters := map[string]bool{} + bundleProposal, _ := k.GetBundleProposal(ctx, poolId) + + for _, address := range bundleProposal.VotersValid { + voters[address] = true + } + + for _, address := range bundleProposal.VotersInvalid { + voters[address] = true + } + + for _, address := range bundleProposal.VotersAbstain { + voters[address] = true + } + + for _, staker := range k.stakerKeeper.GetAllStakerAddressesOfPool(ctx, poolId) { + if !voters[staker] { + k.addPoint(ctx, poolId, staker) + } + } +} + +// calculatePayouts deducts the network fee from the rewards and splits the remaining amount +// between the staker and its delegators. If there are no delegators, the entire amount is +// awarded to the staker. +func (k Keeper) calculatePayouts(ctx sdk.Context, poolId uint64) (bundleReward types.BundleReward) { + pool, _ := k.poolKeeper.GetPoolWithError(ctx, poolId) + bundleProposal, _ := k.GetBundleProposal(ctx, poolId) + + // Should not happen, if so move everything to the treasury + if !k.stakerKeeper.DoesStakerExist(ctx, bundleProposal.Uploader) { + bundleReward.Treasury = bundleReward.Total + + return + } + + // formula for calculating the rewards + bundleReward.Total = pool.OperatingCost + (bundleProposal.DataSize * k.GetStorageCost(ctx)) + + networkFee, err := sdk.NewDecFromStr(k.GetNetworkFee(ctx)) + if err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "Network Fee unparasable - "+k.GetNetworkFee(ctx)) + } + // Add fee to treasury + bundleReward.Treasury = uint64(sdk.NewDec(int64(bundleReward.Total)).Mul(networkFee).RoundInt64()) + + // Remaining rewards to be split between staker and its delegators + totalNodeReward := bundleReward.Total - bundleReward.Treasury + + // Payout delegators + if k.delegationKeeper.GetDelegationAmount(ctx, bundleProposal.Uploader) > 0 { + commission := k.stakerKeeper.GetCommission(ctx, bundleProposal.Uploader) + + bundleReward.Uploader = uint64(sdk.NewDec(int64(totalNodeReward)).Mul(commission).RoundInt64()) + bundleReward.Delegation = totalNodeReward - bundleReward.Uploader + } else { + bundleReward.Uploader = totalNodeReward + bundleReward.Delegation = 0 + } + + return +} + +// registerBundleProposalFromUploader handles the registration of the new bundle proposal +// an uploader has just submitted. With this new bundle proposal other participants +// can vote on it. +func (k Keeper) registerBundleProposalFromUploader(ctx sdk.Context, msg *types.MsgSubmitBundleProposal, nextUploader string) { + pool, _ := k.poolKeeper.GetPool(ctx, msg.PoolId) + + bundleProposal := types.BundleProposal{ + PoolId: msg.PoolId, + Uploader: msg.Staker, + NextUploader: nextUploader, + StorageId: msg.StorageId, + DataSize: msg.DataSize, + BundleSize: msg.BundleSize, + UpdatedAt: uint64(ctx.BlockTime().Unix()), + VotersValid: append(make([]string, 0), msg.Staker), + FromKey: msg.FromKey, + ToKey: msg.ToKey, + BundleSummary: msg.BundleSummary, + DataHash: msg.DataHash, + StorageProviderId: pool.CurrentStorageProviderId, + CompressionId: pool.CurrentCompressionId, + } + + // Emit a vote event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventBundleVote{ + PoolId: msg.PoolId, + Staker: msg.Staker, + StorageId: msg.StorageId, + Vote: types.VOTE_TYPE_VALID, + }) + + k.SetBundleProposal(ctx, bundleProposal) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventBundleProposed{ + PoolId: bundleProposal.PoolId, + Id: pool.TotalBundles, + StorageId: bundleProposal.StorageId, + Uploader: bundleProposal.Uploader, + DataSize: bundleProposal.DataSize, + FromIndex: pool.CurrentIndex, + BundleSize: bundleProposal.BundleSize, + FromKey: bundleProposal.FromKey, + ToKey: bundleProposal.ToKey, + BundleSummary: bundleProposal.BundleSummary, + DataHash: bundleProposal.DataHash, + ProposedAt: uint64(ctx.BlockTime().Unix()), + StorageProviderId: bundleProposal.StorageProviderId, + CompressionId: bundleProposal.CompressionId, + }) +} + +// finalizeCurrentBundleProposal takes the data of the current evaluated proposal +// and stores it as a finalized proposal. This only happens if the network +// reached quorum on the proposal's validity. +func (k Keeper) finalizeCurrentBundleProposal(ctx sdk.Context, poolId uint64, voteDistribution types.VoteDistribution, bundleReward types.BundleReward, nextUploader string) { + pool, _ := k.poolKeeper.GetPool(ctx, poolId) + bundleProposal, _ := k.GetBundleProposal(ctx, poolId) + + // save finalized bundle + finalizedBundle := types.FinalizedBundle{ + StorageId: bundleProposal.StorageId, + PoolId: pool.Id, + Id: pool.TotalBundles, + Uploader: bundleProposal.Uploader, + FromIndex: pool.CurrentIndex, + ToIndex: pool.CurrentIndex + bundleProposal.BundleSize, + FinalizedAt: uint64(ctx.BlockHeight()), + FromKey: bundleProposal.FromKey, + ToKey: bundleProposal.ToKey, + BundleSummary: bundleProposal.BundleSummary, + DataHash: bundleProposal.DataHash, + StorageProviderId: bundleProposal.StorageProviderId, + CompressionId: bundleProposal.CompressionId, + } + + k.SetFinalizedBundle(ctx, finalizedBundle) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventBundleFinalized{ + PoolId: finalizedBundle.PoolId, + Id: finalizedBundle.Id, + Valid: voteDistribution.Valid, + Invalid: voteDistribution.Invalid, + Abstain: voteDistribution.Abstain, + Total: voteDistribution.Total, + Status: voteDistribution.Status, + RewardTreasury: bundleReward.Treasury, + RewardUploader: bundleReward.Uploader, + RewardDelegation: bundleReward.Delegation, + RewardTotal: bundleReward.Total, + FinalizedAt: uint64(ctx.BlockTime().Unix()), + Uploader: bundleProposal.Uploader, + NextUploader: nextUploader, + }) + + // Finalize the proposal, saving useful information. + k.poolKeeper.IncrementBundleInformation(ctx, pool.Id, pool.CurrentIndex+bundleProposal.BundleSize, bundleProposal.ToKey, bundleProposal.BundleSummary) +} + +// dropCurrentBundleProposal removes the current proposal due to not reaching +// a required quorum on the validity of the data. When the proposal is dropped +// the same next uploader as before can submit his proposal since it is not his +// fault, that the last one did not reach any quorum. +func (k Keeper) dropCurrentBundleProposal( + ctx sdk.Context, + poolId uint64, + voteDistribution types.VoteDistribution, + nextUploader string, +) { + pool, _ := k.poolKeeper.GetPool(ctx, poolId) + bundleProposal, _ := k.GetBundleProposal(ctx, poolId) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventBundleFinalized{ + PoolId: pool.Id, + Id: pool.TotalBundles, + Valid: voteDistribution.Valid, + Invalid: voteDistribution.Invalid, + Abstain: voteDistribution.Abstain, + Total: voteDistribution.Total, + Status: voteDistribution.Status, + RewardTreasury: 0, + RewardUploader: 0, + RewardDelegation: 0, + RewardTotal: 0, + FinalizedAt: uint64(ctx.BlockTime().Unix()), + Uploader: bundleProposal.Uploader, + }) + + // drop bundle + bundleProposal = types.BundleProposal{ + PoolId: pool.Id, + NextUploader: nextUploader, + UpdatedAt: uint64(ctx.BlockTime().Unix()), + } + + k.SetBundleProposal(ctx, bundleProposal) +} + +// calculateVotingPower calculates the voting power one staker has in a +// storage pool based only on the total delegation this staker has +func (k Keeper) calculateVotingPower(delegation uint64) (votingPower uint64) { + // voting power is linear + votingPower = delegation + return +} + +// RandomChoiceCandidate holds the voting power of a candidate for the +// next uploader selection +type RandomChoiceCandidate struct { + Account string + VotingPower uint64 +} + +// getWeightedRandomChoice is an internal function that returns a weighted random +// selection out of a list of candidates based on their voting power. +func (k Keeper) getWeightedRandomChoice(candidates []RandomChoiceCandidate, seed int64) string { + type WeightedRandomChoice struct { + Elements []string + Weights []uint64 + TotalWeight uint64 + } + + wrc := WeightedRandomChoice{} + + for _, candidate := range candidates { + i := sort.Search(len(wrc.Weights), func(i int) bool { return wrc.Weights[i] > candidate.VotingPower }) + wrc.Weights = append(wrc.Weights, 0) + wrc.Elements = append(wrc.Elements, "") + copy(wrc.Weights[i+1:], wrc.Weights[i:]) + copy(wrc.Elements[i+1:], wrc.Elements[i:]) + wrc.Weights[i] = candidate.VotingPower + wrc.Elements[i] = candidate.Account + wrc.TotalWeight += candidate.VotingPower + } + + if wrc.TotalWeight == 0 { + return "" + } + + value := rand.New(rand.NewSource(seed)).Uint64() % wrc.TotalWeight + + for key, weight := range wrc.Weights { + if weight > value { + return wrc.Elements[key] + } + + value -= weight + } + + return "" +} + +// chooseNextUploaderFromSelectedStakers selects the next uploader based on a +// fixed set of stakers in a pool. It is guaranteed that someone is chosen +// deterministically +func (k Keeper) chooseNextUploaderFromSelectedStakers(ctx sdk.Context, poolId uint64, addresses []string) (nextUploader string) { + var _candidates []RandomChoiceCandidate + + if len(addresses) == 0 { + return "" + } + + for _, s := range addresses { + if k.stakerKeeper.DoesValaccountExist(ctx, poolId, s) { + delegation := k.delegationKeeper.GetDelegationAmount(ctx, s) + + _candidates = append(_candidates, RandomChoiceCandidate{ + Account: s, + VotingPower: k.calculateVotingPower(delegation), + }) + } + } + + seed := int64(binary.BigEndian.Uint64(ctx.BlockHeader().AppHash)) + return k.getWeightedRandomChoice(_candidates, seed) +} + +// chooseNextUploaderFromAllStakers selects the next uploader based on all +// stakers in a pool. It is guaranteed that someone is chosen +// deterministically +func (k Keeper) chooseNextUploaderFromAllStakers(ctx sdk.Context, poolId uint64) (nextUploader string) { + stakers := k.stakerKeeper.GetAllStakerAddressesOfPool(ctx, poolId) + return k.chooseNextUploaderFromSelectedStakers(ctx, poolId, stakers) +} + +// GetVoteDistribution is an internal function evaluates the quorum status +// based on the voting power of the current bundle proposal. +func (k Keeper) GetVoteDistribution(ctx sdk.Context, poolId uint64) (voteDistribution types.VoteDistribution) { + bundleProposal, found := k.GetBundleProposal(ctx, poolId) + if !found { + return + } + + // get voting power for valid + for _, voter := range bundleProposal.VotersValid { + // valaccount was found the voter is active in the pool + if k.stakerKeeper.DoesValaccountExist(ctx, poolId, voter) { + delegation := k.delegationKeeper.GetDelegationAmount(ctx, voter) + voteDistribution.Valid += k.calculateVotingPower(delegation) + } + } + + // get voting power for invalid + for _, voter := range bundleProposal.VotersInvalid { + // valaccount was found the voter is active in the pool + if k.stakerKeeper.DoesValaccountExist(ctx, poolId, voter) { + delegation := k.delegationKeeper.GetDelegationAmount(ctx, voter) + voteDistribution.Invalid += k.calculateVotingPower(delegation) + } + } + + // get voting power for abstain + for _, voter := range bundleProposal.VotersAbstain { + // valaccount was found the voter is active in the pool + if k.stakerKeeper.DoesValaccountExist(ctx, poolId, voter) { + delegation := k.delegationKeeper.GetDelegationAmount(ctx, voter) + voteDistribution.Abstain += k.calculateVotingPower(delegation) + } + } + + // get total voting power + for _, staker := range k.stakerKeeper.GetAllStakerAddressesOfPool(ctx, poolId) { + delegation := k.delegationKeeper.GetDelegationAmount(ctx, staker) + voteDistribution.Total += k.calculateVotingPower(delegation) + } + + if voteDistribution.Total == 0 { + // if total voting power is zero no quorum can be reached + voteDistribution.Status = types.BUNDLE_STATUS_NO_QUORUM + } else if voteDistribution.Valid*2 > voteDistribution.Total { + // if more than 50% voted for valid quorum is reached + voteDistribution.Status = types.BUNDLE_STATUS_VALID + } else if voteDistribution.Invalid*2 >= voteDistribution.Total { + // if more or equal than 50% voted for invalid quorum is reached + voteDistribution.Status = types.BUNDLE_STATUS_INVALID + } else { + // if neither valid nor invalid reached 50% no quorum was reached + voteDistribution.Status = types.BUNDLE_STATUS_NO_QUORUM + } + + return +} diff --git a/x/bundles/keeper/logic_bundles_test.go b/x/bundles/keeper/logic_bundles_test.go new file mode 100644 index 00000000..5cfa9b49 --- /dev/null +++ b/x/bundles/keeper/logic_bundles_test.go @@ -0,0 +1,982 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundlesTypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - logic_bundles.go + +* Assert pool can run while pool is upgrading +* Assert pool can run while pool is disabled +* Assert pool can run while pool has no funds +* Assert pool can run while min delegation is not reached +* Assert pool can run + +* Assert can vote if sender is no staker +* Assert can vote if bundle is dropped +* Assert can vote if storage id does not match +* Assert can vote if sender has already voted valid +* Assert can vote if sender has already voted invalid +* Assert can vote + +* Assert can propose if sender is no staker +* Assert can propose if sender is not next uploader +* Assert can propose if upload interval has not passed +* Assert can propose if index does not match +* Assert can propose + +*/ + +var _ = Describe("logic_bundles.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + // ASSERT POOL CAN RUN + + It("Assert pool can run while pool is upgrading", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 60, + }, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + // ACT + err := s.App().BundlesKeeper.AssertPoolCanRun(s.Ctx(), 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert pool can run while pool is disabled", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Disabled: true, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ACT + err := s.App().BundlesKeeper.AssertPoolCanRun(s.Ctx(), 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert pool can run while pool has no funds", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + // ACT + err := s.App().BundlesKeeper.AssertPoolCanRun(s.Ctx(), 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert pool can run while min delegation is not reached", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 99 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + // ACT + err := s.App().BundlesKeeper.AssertPoolCanRun(s.Ctx(), 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert pool can run", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + // ACT + err := s.App().BundlesKeeper.AssertPoolCanRun(s.Ctx(), 0) + + // ASSERT + Expect(err).NotTo(HaveOccurred()) + }) + + // ASSERT CAN VOTE + + It("Assert can vote if sender is no staker", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + // ACT + err := s.App().BundlesKeeper.AssertCanVote(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI") + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can vote if bundle is dropped", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(1) + + // ACT + err := s.App().BundlesKeeper.AssertCanVote(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI") + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can vote if storage id does not match", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "another_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + // ACT + err := s.App().BundlesKeeper.AssertCanVote(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI") + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can vote if sender has already voted valid", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: 1, + }) + + // ACT + err := s.App().BundlesKeeper.AssertCanVote(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI") + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can vote if sender has already voted invalid", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: 2, + }) + + // ACT + err := s.App().BundlesKeeper.AssertCanVote(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI") + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can vote", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.CommitAfterSeconds(60) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + // ACT + err := s.App().BundlesKeeper.AssertCanVote(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI") + + // ASSERT + Expect(err).NotTo(HaveOccurred()) + }) + + // ASSERT CAN PROPOSE + + It("Assert can propose if sender is no staker", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + // ACT + err := s.App().BundlesKeeper.AssertCanPropose(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can propose if sender is not next uploader", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.CommitAfterSeconds(60) + + // ACT + err := s.App().BundlesKeeper.AssertCanPropose(s.Ctx(), 0, i.STAKER_1, i.VALADDRESS_1, 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can propose if upload interval has not passed", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(30) + + // ACT + err := s.App().BundlesKeeper.AssertCanPropose(s.Ctx(), 0, i.STAKER_0, i.VALADDRESS_0, 0) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can propose if index does not match", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + // ACT + err := s.App().BundlesKeeper.AssertCanPropose(s.Ctx(), 0, i.STAKER_0, i.VALADDRESS_0, 1000) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Assert can propose", func() { + // ASSERT + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + UploadInterval: 60, + OperatingCost: 2 * i.KYVE, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundlesTypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + // ACT + err := s.App().BundlesKeeper.AssertCanPropose(s.Ctx(), 0, i.STAKER_0, i.VALADDRESS_0, 0) + + // ASSERT + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/x/bundles/keeper/logic_end_block_handle_upload_timeout.go b/x/bundles/keeper/logic_end_block_handle_upload_timeout.go new file mode 100644 index 00000000..ca5d1e3c --- /dev/null +++ b/x/bundles/keeper/logic_end_block_handle_upload_timeout.go @@ -0,0 +1,84 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// HandleUploadTimeout is an end block hook that triggers an upload timeout for every pool (if applicable). +func (k Keeper) HandleUploadTimeout(goCtx context.Context) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Iterate over all pool Ids. + for _, pool := range k.poolKeeper.GetAllPools(ctx) { + err := k.AssertPoolCanRun(ctx, pool.Id) + bundleProposal, _ := k.GetBundleProposal(ctx, pool.Id) + + // Check if pool is active + if err != nil { + // if pool was disabled we drop the current bundle. We only drop + // if there is an ongoing bundle proposal. Else we just remove the next + // uploader + if err == types.ErrPoolDisabled && bundleProposal.StorageId != "" { + k.dropCurrentBundleProposal(ctx, pool.Id, types.VoteDistribution{ + Valid: 0, + Invalid: 0, + Abstain: 0, + Total: 0, + Status: types.BUNDLE_STATUS_DISABLED, + }, "") + } else if bundleProposal.NextUploader != "" { + bundleProposal.NextUploader = "" + k.SetBundleProposal(ctx, bundleProposal) + } + + // since a paused or disabled pool can not produce any bundles + // we continue because timeout slashes don't apply in this case + continue + } + + // Skip if we haven't reached the upload interval. + if uint64(ctx.BlockTime().Unix()) < (bundleProposal.UpdatedAt + pool.UploadInterval) { + continue + } + + // Check if bundle needs to be dropped + if bundleProposal.StorageId != "" { + // check if the quorum was actually reached + voteDistribution := k.GetVoteDistribution(ctx, pool.Id) + + if voteDistribution.Status == types.BUNDLE_STATUS_NO_QUORUM { + // handle stakers who did not vote at all + k.handleNonVoters(ctx, pool.Id) + + // Get next uploader from all pool stakers + nextUploader := k.chooseNextUploaderFromAllStakers(ctx, pool.Id) + + // If consensus wasn't reached, we drop the bundle and emit an event. + k.dropCurrentBundleProposal(ctx, pool.Id, voteDistribution, nextUploader) + continue + } + } + + // Skip if we haven't reached the upload timeout. + if uint64(ctx.BlockTime().Unix()) < (bundleProposal.UpdatedAt + pool.UploadInterval + k.GetUploadTimeout(ctx)) { + continue + } + + // We now know that the pool is active and the upload timeout has been reached. + + // Now we increase the points of the valaccount + // (if he is still participating in the pool) and select a new one. + if k.stakerKeeper.DoesValaccountExist(ctx, pool.Id, bundleProposal.NextUploader) { + k.addPoint(ctx, pool.Id, bundleProposal.NextUploader) + } + + // Update bundle proposal and choose next uploader + bundleProposal.NextUploader = k.chooseNextUploaderFromAllStakers(ctx, pool.Id) + bundleProposal.UpdatedAt = uint64(ctx.BlockTime().Unix()) + + k.SetBundleProposal(ctx, bundleProposal) + } +} diff --git a/x/bundles/keeper/logic_end_block_handle_upload_timeout_test.go b/x/bundles/keeper/logic_end_block_handle_upload_timeout_test.go new file mode 100644 index 00000000..40a8ee93 --- /dev/null +++ b/x/bundles/keeper/logic_end_block_handle_upload_timeout_test.go @@ -0,0 +1,994 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - logic_end_block_handle_upload_timeout.go + +* First staker who joins gets automatically chosen as next uploader +* Next uploader gets removed due to pool upgrading +* Next uploader gets removed due to pool being disabled +* Next uploader gets removed due to pool having no funds +* Next uploader gets removed due to pool not reaching min stake +* Staker is next uploader of genesis bundle and upload interval and timeout does not pass +* Staker is next uploader of genesis bundle and upload timeout does not pass but upload interval passes +* Staker is next uploader of genesis bundle and upload timeout does pass together with upload interval +* Staker is next uploader of bundle proposal and upload interval does not pass +* Staker is next uploader of bundle proposal and upload timeout does not pass +* Staker is next uploader of bundle proposal and upload timeout passes +* Staker with already max points is next uploader of bundle proposal and upload timeout passes +* A bundle proposal with no quorum does not reach the upload interval +* A bundle proposal with no quorum does reach the upload interval +* Staker who just left the pool is next uploader of dropped bundle proposal and upload timeout passes +* Staker who just left the pool is next uploader of valid bundle proposal and upload timeout passes +* Staker who just left the pool is next uploader of invalid bundle proposal and upload timeout passes +* Staker with already max points is next uploader of bundle proposal in a second pool and upload timeout passes + +*/ + +var _ = Describe("logic_end_block_handle_upload_timeout.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + MinDelegation: 100 * i.KYVE, + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("First staker who joins gets automatically chosen as next uploader", func() { + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Next uploader gets removed due to pool upgrading", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + pool.UpgradePlan = &pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 3600, + } + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Next uploader gets removed due to pool being disabled", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + pool.Disabled = true + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Next uploader gets removed due to pool having no funds", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Next uploader gets removed due to pool not reaching min stake", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgUndelegate{ + Creator: i.STAKER_0, + Staker: i.STAKER_0, + Amount: 50 * i.KYVE, + }) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx())) + s.CommitAfterSeconds(1) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(50 * i.KYVE)) + }) + + It("Staker is next uploader of genesis bundle and upload interval and timeout does not pass", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Staker is next uploader of genesis bundle and upload timeout does not pass but upload interval passes", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ACT + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Staker is next uploader of genesis bundle and upload timeout does pass together with upload interval", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ACT + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + // check if next uploader got not removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + // check if next uploader received a point + valaccount, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccount.Points).To(Equal(uint64(1))) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + expectedBalance := 100 * i.KYVE + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0))) + }) + + It("Staker is next uploader of bundle proposal and upload interval does not pass", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Staker is next uploader of bundle proposal and upload timeout does not pass", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ACT + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Staker is next uploader of bundle proposal and upload timeout passes", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ACT + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + // check if next uploader got not removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + // check if next uploader received a point + valaccount, _ := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + Expect(valaccount.Points).To(Equal(uint64(1))) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + expectedBalance := 100 * i.KYVE + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0))) + }) + + It("Staker with already max points is next uploader of bundle proposal and upload timeout passes", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.CommitAfterSeconds(60) + + maxPoints := int(s.App().BundlesKeeper.GetMaxPoints(s.Ctx())) - 1 + + for r := 1; r <= maxPoints; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_0 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // overwrite next uploader with staker_1 + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + + // check if next uploader got not removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + // check if next uploader received a point + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + Expect(valaccountFound).To(BeFalse()) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_1) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + slashAmountRatio, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetTimeoutSlash(s.Ctx())) + expectedBalance := 50*i.KYVE - uint64(sdk.NewDec(int64(50*i.KYVE)).Mul(slashAmountRatio).RoundInt64()) + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1))) + }) + + It("A bundle proposal with no quorum does not reach the upload interval", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // ACT + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(2)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("A bundle proposal with no quorum does reach the upload interval", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // ACT + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(2)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Staker who just left the pool is next uploader of dropped bundle proposal and upload timeout passes", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // remove valaccount directly from pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_0) + + // ACT + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + // check if next uploader got removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + expectedBalance := 100 * i.KYVE + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0))) + }) + + It("Staker who just left the pool is next uploader of valid bundle proposal and upload timeout passes", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + // remove valaccount directly from pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_0) + + // ACT + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + // check if next uploader got removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + expectedBalance := 100 * i.KYVE + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0))) + }) + + It("Staker who just left the pool is next uploader of invalid bundle proposal and upload timeout passes", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + // remove valaccount directly from pool + s.App().StakersKeeper.RemoveValaccountFromPool(s.Ctx(), 0, i.STAKER_0) + + // ACT + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + // check if next uploader got removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0) + Expect(poolStakers).To(HaveLen(1)) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + expectedBalance := 100 * i.KYVE + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0))) + }) + + It("Staker with already max points is next uploader of bundle proposal in a second pool and upload timeout passes", func() { + // ARRANGE + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest2", + MaxBundleSize: 100, + StartKey: "0", + MinDelegation: 100 * i.KYVE, + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 1, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 1, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 1, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_2, + Amount: 50 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_2, + PoolId: 1, + Valaddress: i.VALADDRESS_2, + }) + + s.CommitAfterSeconds(60) + + maxPoints := int(s.App().BundlesKeeper.GetMaxPoints(s.Ctx())) - 1 + + for r := 1; r <= maxPoints; r++ { + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 1, + StorageId: "P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg", + DataSize: 100, + DataHash: "test_hash", + FromIndex: uint64(r * 100), + BundleSize: 100, + FromKey: "test_key", + ToKey: "test_key", + BundleSummary: "test_value", + }) + + // overwrite next uploader for test purposes + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 1) + bundleProposal.NextUploader = i.STAKER_1 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + s.CommitAfterSeconds(60) + + // do not vote + } + + // overwrite next uploader with staker_1 + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 1) + bundleProposal.NextUploader = i.STAKER_2 + s.App().BundlesKeeper.SetBundleProposal(s.Ctx(), bundleProposal) + + // ACT + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // ASSERT + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 1) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + Expect(bundleProposal.StorageId).To(Equal("P9edn0bjEfMU_lecFDIPLvGO2v2ltpFNUMWp5kgPddg")) + + // check if next uploader got not removed from pool + poolStakers := s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 1) + Expect(poolStakers).To(HaveLen(1)) + + // check if next uploader received a point + _, valaccountFound := s.App().StakersKeeper.GetValaccount(s.Ctx(), 1, i.STAKER_2) + Expect(valaccountFound).To(BeFalse()) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_2) + Expect(found).To(BeTrue()) + + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 1)).To(Equal(100 * i.KYVE)) + + // check if next uploader not got slashed + slashAmountRatio, _ := sdk.NewDecFromStr(s.App().DelegationKeeper.GetTimeoutSlash(s.Ctx())) + expectedBalance := 50*i.KYVE - uint64(sdk.NewDec(int64(50*i.KYVE)).Mul(slashAmountRatio).RoundInt64()) + + Expect(expectedBalance).To(Equal(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_2, i.STAKER_2))) + }) +}) diff --git a/x/bundles/keeper/msg_server.go b/x/bundles/keeper/msg_server.go new file mode 100644 index 00000000..9b9959d3 --- /dev/null +++ b/x/bundles/keeper/msg_server.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/bundles/types" +) + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/x/bundles/keeper/msg_server_claim_uploader_role.go b/x/bundles/keeper/msg_server_claim_uploader_role.go new file mode 100644 index 00000000..f6b3ccde --- /dev/null +++ b/x/bundles/keeper/msg_server_claim_uploader_role.go @@ -0,0 +1,60 @@ +package keeper + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/KYVENetwork/chain/x/bundles/types" +) + +// ClaimUploaderRole handles the logic of an SDK message that allows protocol nodes to claim the uploader role. +// Note that this function can only be called when the next uploader is not chosen yet. +// This function obeys "first come, first served" mentality. +func (k msgServer) ClaimUploaderRole( + goCtx context.Context, msg *types.MsgClaimUploaderRole, +) (*types.MsgClaimUploaderRoleResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Assert + + if poolErr := k.AssertPoolCanRun(ctx, msg.PoolId); poolErr != nil { + return nil, poolErr + } + + if err := k.stakerKeeper.AssertValaccountAuthorized(ctx, msg.PoolId, msg.Staker, msg.Creator); err != nil { + return nil, err + } + + // Update bundle proposal + + bundleProposal, found := k.GetBundleProposal(ctx, msg.PoolId) + + // If the pool was newly created no bundle proposal exists yet. + // There is one bundle proposal per pool. + if !found { + bundleProposal.PoolId = msg.PoolId + } + + // Error if the next uploader is already set. + if bundleProposal.NextUploader != "" { + return nil, sdkErrors.Wrap(sdkErrors.ErrUnauthorized, types.ErrUploaderAlreadyClaimed.Error()) + } + + bundleProposal.NextUploader = msg.Staker + bundleProposal.UpdatedAt = uint64(ctx.BlockTime().Unix()) + + k.SetBundleProposal(ctx, bundleProposal) + + // Emit event + + pool, _ := k.poolKeeper.GetPool(ctx, msg.PoolId) + _ = ctx.EventManager().EmitTypedEvent(&types.EventClaimedUploaderRole{ + PoolId: msg.PoolId, + Id: pool.TotalBundles, + NewUploader: bundleProposal.NextUploader, + }) + + return &types.MsgClaimUploaderRoleResponse{}, nil +} diff --git a/x/bundles/keeper/msg_server_claim_uploader_role_test.go b/x/bundles/keeper/msg_server_claim_uploader_role_test.go new file mode 100644 index 00000000..a2c96a92 --- /dev/null +++ b/x/bundles/keeper/msg_server_claim_uploader_role_test.go @@ -0,0 +1,273 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_claim_uploader_role.go + +* Try to claim uploader role without pool being funded +* Try to claim uploader role without being a staker +* Try to claim uploader role if the next uploader is not set yet +* Try to claim uploader role with non-existing valaccount +* Try to claim uploader role with valaccount that belongs to another pool + +*/ + +var _ = Describe("msg_server_claim_uploader_role.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Try to claim uploader role without pool being funded", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ASSERT + _, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeFalse()) + }) + + It("Try to claim uploader role without being a staker", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ASSERT + _, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeFalse()) + }) + + It("Try to claim uploader role if the next uploader is not set yet", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) + + It("Try to claim uploader role with non existing valaccount", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ASSERT + _, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeFalse()) + }) + + It("Try to claim uploader role with valaccount that belongs to another pool", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest2", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 1, + Valaddress: i.VALADDRESS_0, + }) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ASSERT + _, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeFalse()) + }) + + It("Try to claim uploader role if someone else is already next uploader", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) +}) diff --git a/x/bundles/keeper/msg_server_skip_uploader_role.go b/x/bundles/keeper/msg_server_skip_uploader_role.go new file mode 100644 index 00000000..de9a0db4 --- /dev/null +++ b/x/bundles/keeper/msg_server_skip_uploader_role.go @@ -0,0 +1,56 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SkipUploaderRole handles the logic of an SDK message that allows protocol nodes to skip an upload. +func (k msgServer) SkipUploaderRole( + goCtx context.Context, msg *types.MsgSkipUploaderRole, +) (*types.MsgSkipUploaderRoleResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if err := k.AssertCanPropose(ctx, msg.PoolId, msg.Staker, msg.Creator, msg.FromIndex); err != nil { + return nil, err + } + + pool, _ := k.poolKeeper.GetPool(ctx, msg.PoolId) + bundleProposal, _ := k.GetBundleProposal(ctx, msg.PoolId) + + // reset points of uploader as node has proven to be active + k.resetPoints(ctx, msg.PoolId, msg.Staker) + + // Get next uploader from stakers voted + stakers := make([]string, 0) + nextUploader := "" + + // exclude the staker who skips the uploader role + for _, staker := range k.stakerKeeper.GetAllStakerAddressesOfPool(ctx, msg.PoolId) { + if staker != msg.Staker { + stakers = append(stakers, staker) + } + } + + if len(stakers) > 0 { + nextUploader = k.chooseNextUploaderFromSelectedStakers(ctx, msg.PoolId, stakers) + } else { + nextUploader = k.chooseNextUploaderFromAllStakers(ctx, msg.PoolId) + } + + bundleProposal.NextUploader = nextUploader + bundleProposal.UpdatedAt = uint64(ctx.BlockTime().Unix()) + + k.SetBundleProposal(ctx, bundleProposal) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventSkippedUploaderRole{ + PoolId: msg.PoolId, + Id: pool.TotalBundles, + PreviousUploader: msg.Staker, + NewUploader: nextUploader, + }) + + return &types.MsgSkipUploaderRoleResponse{}, nil +} diff --git a/x/bundles/keeper/msg_server_skip_uploader_role_test.go b/x/bundles/keeper/msg_server_skip_uploader_role_test.go new file mode 100644 index 00000000..d86e01c2 --- /dev/null +++ b/x/bundles/keeper/msg_server_skip_uploader_role_test.go @@ -0,0 +1,269 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - msg_server_skip_uploader_role.go + +* Skip uploader role on data bundle if staker is next uploader +* Skip uploader on data bundle after uploader role has already been skipped +* Skip uploader on data bundle if staker is the only staker in pool +* Skip uploader role on dropped bundle + +*/ + +var _ = Describe("msg_server_skip_uploader_role.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Skip uploader role on data bundle if staker is next uploader", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSkipUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + FromIndex: 100, + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // here the next uploader should be always be different after skipping + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_1)) + }) + + It("Skip uploader on data bundle after uploader role has already been skipped", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSkipUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + FromIndex: 100, + }) + + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSkipUploaderRole{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + FromIndex: 100, + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // here the next uploader should be always be different after skipping + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + }) + + It("Skip uploader on data bundle if staker is the only staker in pool", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSkipUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + FromIndex: 100, + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + }) + + It("Skip uploader role on dropped bundle", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 200 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // create dropped bundle + s.CommitAfterSeconds(60) + s.CommitAfterSeconds(1) + + // wait for upload interval + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSkipUploaderRole{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + FromIndex: 0, + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + + // here the next uploader should be always be different after skipping + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + }) +}) diff --git a/x/bundles/keeper/msg_server_submit_bundle_proposal.go b/x/bundles/keeper/msg_server_submit_bundle_proposal.go new file mode 100644 index 00000000..f72efe04 --- /dev/null +++ b/x/bundles/keeper/msg_server_submit_bundle_proposal.go @@ -0,0 +1,158 @@ +package keeper + +import ( + "context" + + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SubmitBundleProposal handles the logic of an SDK message that allows protocol nodes to submit a new bundle proposal. +func (k msgServer) SubmitBundleProposal( + goCtx context.Context, msg *types.MsgSubmitBundleProposal, +) (*types.MsgSubmitBundleProposalResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if err := k.AssertCanPropose(ctx, msg.PoolId, msg.Staker, msg.Creator, msg.FromIndex); err != nil { + return nil, err + } + + bundleProposal, _ := k.GetBundleProposal(ctx, msg.PoolId) + + // Validate submit bundle args. + if err := k.validateSubmitBundleArgs(ctx, &bundleProposal, msg); err != nil { + return nil, err + } + + // Reset points of uploader as node has proven to be active. + k.resetPoints(ctx, msg.PoolId, msg.Staker) + + // If previous bundle was dropped just register the new bundle. + // No previous round needs to be evaluated + if bundleProposal.StorageId == "" { + nextUploader := k.chooseNextUploaderFromAllStakers(ctx, msg.PoolId) + + k.registerBundleProposalFromUploader(ctx, msg, nextUploader) + + return &types.MsgSubmitBundleProposalResponse{}, nil + } + + // Previous round contains a bundle which needs to be validated now. + + // Increase points of stakers who did not vote at all + slash + remove if necessary. + // The protocol requires everybody to stay always active. + k.handleNonVoters(ctx, msg.PoolId) + + // evaluate all votes and determine status based on the votes weighted with stake + delegation + voteDistribution := k.GetVoteDistribution(ctx, msg.PoolId) + + // Handle tally outcome + switch voteDistribution.Status { + + case types.BUNDLE_STATUS_VALID: + // If a bundle is valid the following things happen: + // 1. A reward is paid out to the uploader, its delegators and the treasury + // The appropriate funds are deducted from the total pool funds + // 2. The next uploader is randomly selected based on everybody who + // voted valid on this bundle. + // 3. The bundle is finalized by added it permanently to the state. + // 4. The sender immediately starts the next round by registering + // his new bundle proposal. + + // Calculate the total reward for the bundle, and individual payouts. + bundleReward := k.calculatePayouts(ctx, msg.PoolId) + + if err := k.poolKeeper.ChargeFundersOfPool(ctx, msg.PoolId, bundleReward.Total); err != nil { + // update the latest time on bundle to indicate that the bundle is still active + // protocol nodes use this to determine the upload timeout + bundleProposal.UpdatedAt = uint64(ctx.BlockTime().Unix()) + k.SetBundleProposal(ctx, bundleProposal) + + // emit event which indicates that pool has run out of funds + _ = ctx.EventManager().EmitTypedEvent(&pooltypes.EventPoolOutOfFunds{ + PoolId: msg.PoolId, + }) + + return &types.MsgSubmitBundleProposalResponse{}, nil + } + + pool, _ := k.poolKeeper.GetPool(ctx, msg.PoolId) + bundleProposal, _ := k.GetBundleProposal(ctx, msg.PoolId) + + uploaderPayout := bundleReward.Uploader + + delegationPayoutSuccessful := k.delegationKeeper.PayoutRewards(ctx, bundleProposal.Uploader, bundleReward.Delegation, pooltypes.ModuleName) + // If staker has no delegators add all delegation rewards to the staker rewards + if !delegationPayoutSuccessful { + uploaderPayout += bundleReward.Delegation + } + + // send commission to uploader + if err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, pooltypes.ModuleName, bundleProposal.Uploader, uploaderPayout); err != nil { + return nil, err + } + + // send network fee to treasury + if err := util.TransferFromModuleToTreasury(k.accountKeeper, k.distrkeeper, ctx, pooltypes.ModuleName, bundleReward.Treasury); err != nil { + return nil, err + } + + // slash stakers who voted incorrectly + for _, voter := range bundleProposal.VotersInvalid { + k.slashDelegatorsAndRemoveStaker(ctx, msg.PoolId, voter, delegationTypes.SLASH_TYPE_VOTE) + } + + // Determine next uploader and register next bundle + + // Get next uploader from stakers who voted `valid` and are still active + activeVoters := make([]string, 0) + nextUploader := "" + for _, voter := range bundleProposal.VotersValid { + if k.stakerKeeper.DoesValaccountExist(ctx, msg.PoolId, voter) { + activeVoters = append(activeVoters, voter) + } + } + + if len(activeVoters) > 0 { + nextUploader = k.chooseNextUploaderFromSelectedStakers(ctx, msg.PoolId, activeVoters) + } else { + nextUploader = k.chooseNextUploaderFromAllStakers(ctx, msg.PoolId) + } + + k.finalizeCurrentBundleProposal(ctx, pool.Id, voteDistribution, bundleReward, nextUploader) + + // Register the provided bundle as a new proposal for the next round + k.registerBundleProposalFromUploader(ctx, msg, nextUploader) + + return &types.MsgSubmitBundleProposalResponse{}, nil + + case types.BUNDLE_STATUS_INVALID: + // If the bundles is invalid, everybody who voted incorrectly gets slashed. + // The bundle provided by the message-sender is of no mean, because the previous bundle + // turned out to be incorrect. + // There this round needs to start again and the message-sender stays uploader. + + // slash stakers who voted incorrectly - uploader receives upload slash + for _, voter := range bundleProposal.VotersValid { + if voter == bundleProposal.Uploader { + k.slashDelegatorsAndRemoveStaker(ctx, msg.PoolId, voter, delegationTypes.SLASH_TYPE_UPLOAD) + } else { + k.slashDelegatorsAndRemoveStaker(ctx, msg.PoolId, voter, delegationTypes.SLASH_TYPE_VOTE) + } + } + + // Drop current bundle. Can't register the provided bundle because the previous bundles + // needs to be resubmitted first. + k.dropCurrentBundleProposal(ctx, msg.PoolId, voteDistribution, bundleProposal.NextUploader) + + return &types.MsgSubmitBundleProposalResponse{}, nil + + default: + // If the bundle is neither valid nor invalid the quorum has not been reached yet. + return nil, types.ErrQuorumNotReached + } +} diff --git a/x/bundles/keeper/msg_server_submit_bundle_proposal_test.go b/x/bundles/keeper/msg_server_submit_bundle_proposal_test.go new file mode 100644 index 00000000..afc891ba --- /dev/null +++ b/x/bundles/keeper/msg_server_submit_bundle_proposal_test.go @@ -0,0 +1,390 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_submit_bundle_proposal.go + +* Submit the first bundle proposal with empty storage id +* Submit the first bundle proposal with bigger bundle size than allowed +* Submit the first bundle proposal with empty bundle +* Submit the first bundle proposal with empty from key +* Submit the first bundle proposal with empty to key +* Submit the first bundle proposal with invalid index +* Submit the first bundle proposal with empty data size +* Submit the first bundle proposal with empty data hash +* Submit the first bundle proposal with empty bundle summary +* Submit a bundle proposal with valid args + +*/ + +var _ = Describe("msg_server_submit_bundle_proposal.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Submit the first bundle proposal with empty storage id", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with bigger bundle size than allowed", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 101, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with empty bundle", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 0, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with empty from key", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with empty to key", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with invalid index", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 2, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with empty data size", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 0, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with empty data hash", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with empty bundle summary", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "", + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) + + It("Submit the first bundle proposal with valid args", func() { + // ARRANGE + s.CommitAfterSeconds(60) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + // ASSERT + bundleProposal, found := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(found).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + Expect(bundleProposal.Uploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.NextUploader).To(Equal(i.STAKER_0)) + Expect(bundleProposal.DataSize).To(Equal(uint64(100))) + Expect(bundleProposal.DataHash).To(Equal("test_hash")) + Expect(bundleProposal.BundleSize).To(Equal(uint64(100))) + Expect(bundleProposal.FromKey).To(Equal("0")) + Expect(bundleProposal.ToKey).To(Equal("99")) + Expect(bundleProposal.BundleSummary).To(Equal("test_value")) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_0)) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) +}) diff --git a/x/bundles/keeper/msg_server_update_params.go b/x/bundles/keeper/msg_server_update_params.go new file mode 100644 index 00000000..72a4cd76 --- /dev/null +++ b/x/bundles/keeper/msg_server_update_params.go @@ -0,0 +1,30 @@ +package keeper + +import ( + "context" + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + + // Bundles + "github.com/KYVENetwork/chain/x/bundles/types" + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" +) + +func (k msgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + params := k.GetParams(ctx) + + payload := params + _ = json.Unmarshal([]byte(req.Payload), &payload) + k.SetParams(ctx, payload) + + return &types.MsgUpdateParamsResponse{}, nil +} diff --git a/x/bundles/keeper/msg_server_update_params_test.go b/x/bundles/keeper/msg_server_update_params_test.go new file mode 100644 index 00000000..82c83996 --- /dev/null +++ b/x/bundles/keeper/msg_server_update_params_test.go @@ -0,0 +1,493 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Bundles + "github.com/KYVENetwork/chain/x/bundles/types" + // Gov + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" +) + +/* + +TEST CASES - msg_server_update_params.go + +* Check default params +* Invalid authority (transaction) +* Invalid authority (proposal) +* Update every param at once +* Update no param +* Update with invalid formatted payload + +* Update upload timeout +* Update upload timeout with invalid value + +* Update storage cost +* Update storage cost with invalid value + +* Update network fee +* Update network fee with invalid value + +* Update max points +* Update max points with invalid value + +*/ + +var _ = Describe("msg_server_update_params.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + + minDeposit := s.App().GovKeeper.GetDepositParams(s.Ctx()).MinDeposit + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter := sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + + BeforeEach(func() { + s = i.NewCleanChain() + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter = sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Check default params", func() { + // ASSERT + params := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(params.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(params.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(params.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(params.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, err := s.RunTx(proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Update every param at once", func() { + // ARRANGE + payload := `{ + "upload_timeout": 20, + "storage_cost": 100, + "network_fee": "0.05", + "max_points": 15 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(uint64(20))) + Expect(updatedParams.StorageCost).To(Equal(uint64(100))) + Expect(updatedParams.NetworkFee).To(Equal("0.05")) + Expect(updatedParams.MaxPoints).To(Equal(uint64(15))) + }) + + It("Update no params", func() { + // ARRANGE + payload := `{}` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update with invalid formatted payload", func() { + // ARRANGE + payload := `{ + "upload_timeout": 20, + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update upload timeout", func() { + // ARRANGE + payload := `{ + "upload_timeout": 20 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(uint64(20))) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update upload timeout with invalid value", func() { + // ARRANGE + payload := `{ + "upload_timeout": "invalid" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update storage cost", func() { + // ARRANGE + payload := `{ + "storage_cost": 100 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(uint64(100))) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update storage cost with invalid value", func() { + // ARRANGE + payload := `{ + "storage_cost": -100 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update network fee", func() { + // ARRANGE + payload := `{ + "network_fee": "0.05" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal("0.05")) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update network fee with invalid value", func() { + // ARRANGE + payload := `{ + "network_fee": "invalid" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) + + It("Update max points", func() { + // ARRANGE + payload := `{ + "max_points": 15 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(uint64(15))) + }) + + It("Update max points with invalid value", func() { + // ARRANGE + payload := `{ + "max_points": "invalid" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().BundlesKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UploadTimeout).To(Equal(types.DefaultUploadTimeout)) + Expect(updatedParams.StorageCost).To(Equal(types.DefaultStorageCost)) + Expect(updatedParams.NetworkFee).To(Equal(types.DefaultNetworkFee)) + Expect(updatedParams.MaxPoints).To(Equal(types.DefaultMaxPoints)) + }) +}) diff --git a/x/bundles/keeper/msg_server_vote_bundle_proposal.go b/x/bundles/keeper/msg_server_vote_bundle_proposal.go new file mode 100644 index 00000000..9e08af06 --- /dev/null +++ b/x/bundles/keeper/msg_server_vote_bundle_proposal.go @@ -0,0 +1,59 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/bundles/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// VoteBundleProposal handles the logic of an SDK message that allows protocol nodes to vote on a pool's bundle proposal. +func (k msgServer) VoteBundleProposal( + goCtx context.Context, msg *types.MsgVoteBundleProposal, +) (*types.MsgVoteBundleProposalResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if err := k.AssertCanVote(ctx, msg.PoolId, msg.Staker, msg.Creator, msg.StorageId); err != nil { + return nil, err + } + + bundleProposal, _ := k.GetBundleProposal(ctx, msg.PoolId) + hasVotedAbstain := util.ContainsString(bundleProposal.VotersAbstain, msg.Staker) + + if hasVotedAbstain { + if msg.Vote == types.VOTE_TYPE_ABSTAIN { + return nil, types.ErrAlreadyVotedAbstain + } + + // remove voter from abstain votes + bundleProposal.VotersAbstain, _ = util.RemoveFromStringArrayStable(bundleProposal.VotersAbstain, msg.Staker) + } + + switch msg.Vote { + case types.VOTE_TYPE_VALID: + bundleProposal.VotersValid = append(bundleProposal.VotersValid, msg.Staker) + case types.VOTE_TYPE_INVALID: + bundleProposal.VotersInvalid = append(bundleProposal.VotersInvalid, msg.Staker) + case types.VOTE_TYPE_ABSTAIN: + bundleProposal.VotersAbstain = append(bundleProposal.VotersAbstain, msg.Staker) + default: + return nil, sdkErrors.Wrapf(sdkErrors.ErrUnauthorized, types.ErrInvalidVote.Error(), msg.Vote) + } + + k.SetBundleProposal(ctx, bundleProposal) + + // reset points as user has now proven to be active + k.resetPoints(ctx, msg.PoolId, msg.Staker) + + // Emit a vote event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventBundleVote{ + PoolId: msg.PoolId, + Staker: msg.Staker, + StorageId: msg.StorageId, + Vote: msg.Vote, + }) + + return &types.MsgVoteBundleProposalResponse{}, nil +} diff --git a/x/bundles/keeper/msg_server_vote_bundle_proposal_test.go b/x/bundles/keeper/msg_server_vote_bundle_proposal_test.go new file mode 100644 index 00000000..082983ce --- /dev/null +++ b/x/bundles/keeper/msg_server_vote_bundle_proposal_test.go @@ -0,0 +1,294 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_vote_bundle_proposal.go + +* Try to vote valid as the only voter on bundle proposal +* Try to vote invalid as the only voter on bundle proposal +* Try to vote abstain as the only voter on bundle proposal +* Try to vote abstain on proposal again +* Try to vote valid on proposal after abstain vote +* Try to vote invalid on proposal after abstain vote +* Try to vote unspecified on proposal +* Try to vote as not the first voter on bundle proposal + +*/ + +var _ = Describe("msg_server_vote_bundle_proposal.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Try to vote valid as the only voter on bundle proposal", func() { + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + }) + + It("Try to vote invalid as the only voter on bundle proposal", func() { + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + }) + + It("Try to vote abstain as the only voter on bundle proposal", func() { + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).To(ContainElement(i.STAKER_1)) + }) + + It("Try to vote abstain on proposal again", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + // ACT + s.RunTxBundlesError(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).To(ContainElement(i.STAKER_1)) + }) + + It("Try to vote valid on proposal after abstain vote", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + }) + + It("Try to vote invalid on proposal after abstain vote", func() { + // ARRANGE + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + }) + + It("Try to vote unspecified on proposal", func() { + // ACT + s.RunTxBundlesError(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_UNSPECIFIED, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + }) + + It("Try to vote as not the first voter on bundle proposal", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_2, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_2, + PoolId: 0, + Valaddress: i.VALADDRESS_2, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + // ACT + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_2, + Staker: i.STAKER_2, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + // ASSERT + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersValid).To(ContainElement(i.STAKER_2)) + + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersInvalid).NotTo(ContainElement(i.STAKER_2)) + + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_1)) + Expect(bundleProposal.VotersAbstain).NotTo(ContainElement(i.STAKER_2)) + }) +}) diff --git a/x/bundles/module.go b/x/bundles/module.go new file mode 100644 index 00000000..13b211ca --- /dev/null +++ b/x/bundles/module.go @@ -0,0 +1,162 @@ +package bundles + +import ( + "context" + "encoding/json" + "fmt" + // this line is used by starport scaffolding # 1 + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/KYVENetwork/chain/x/bundles/client/cli" + "github.com/KYVENetwork/chain/x/bundles/keeper" + "github.com/KYVENetwork/chain/x/bundles/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + _ = types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.InitMemStore(ctx) +} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + am.keeper.HandleUploadTimeout(sdk.WrapSDKContext(ctx)) + return []abci.ValidatorUpdate{} +} diff --git a/x/bundles/spec/01_concepts.md b/x/bundles/spec/01_concepts.md new file mode 100644 index 00000000..de0e8d7b --- /dev/null +++ b/x/bundles/spec/01_concepts.md @@ -0,0 +1,56 @@ + + +# Concepts + +The bundles module implements the main logic for archiving data on other +storage providers. It handles the submission of bundle proposals, the +finalization of valid bundles and keeps track of validator votes. It +uses the staker and delegation module to determine who can submit/vote +and which participants get slashed for malicious behaviour. + +## Code Structure + +This module adheres to our global coding structure, defined [here](../../../CodeStructure.md). + +## Validating in Rounds + +Data which gets validated and archived by KYVE is validated in rounds. +In every round one uploader will get selected in a deterministic way who +is then responsible for submitting a bundle proposal. Every other participant +then has to validate this bundle proposal. If the network agrees on the proposal +the bundle gets finalized and the network moves to the next bundle proposal. + +## Bundle Proposals + +In order to get data validated and archived by KYVE a participant of a pool +has to package data in a bundle proposal and submit it to the KYVE chain. +The role of the participant is then `Uploader`. Once the bundle proposal with +required metadata like the data range, data size and hash is submitted other +participants can vote on this proposal. + +## Voting + +All other participants who have not uploaded data to the network because they +were not the designated uploader have to validate the submitted data. The role +of the participant is then `Validator`. They take the storage id the uploader +submitted, retrieve it from the storage provider and then compare it locally +with respect to the used runtime implementation. Furthermore, the metadata is +validated by comparing the data size and hash. The validator then votes on the +bundle proposal accordingly. + +## Bundle Evaluation + +After a certain timeout (`upload_interval`) the next uploader can submit the next +bundle proposal. While the next bundle proposal is submitted the current one +gets evaluated. If more than 50% voted for valid the bundle gets finalized and gets +saved forever on-chain so that everyone can use that validated data. + +## Punishing malicious behaviour + +If more than 50% voted invalid the uploader receives a slash and gets removed +from the storage pool. Furthermore, validators who voted incorrectly also get +slashed and removed. If an uploader or validator don't upload/vote in a specific +time range they receive points. If they have a certain number of points they +receive a timeout slash and also get removed. diff --git a/x/bundles/spec/02_state.md b/x/bundles/spec/02_state.md new file mode 100644 index 00000000..2bebce16 --- /dev/null +++ b/x/bundles/spec/02_state.md @@ -0,0 +1,67 @@ + + +# State + +The module is mainly responsible for handling the current bundle +proposal state including holding the state for all finalized bundles. + +## Bundle Proposals +Bundle proposals have their own prefix in the KV-Store and are defined in +one proto file + +### BundleProposal +BundleProposal has all the information of the current bundle proposal and +also keeps track of votes. One bundle proposal is always linked to one +storage pool with a 1-1 relationship. + +- BundleProposal `0x01 | PoolId -> ProtocolBuffer(bundleProposal)` + +```go +type BundleProposal struct { + PoolId uint64 + StorageId string + Uploader string + NextUploader string + DataSize uint64 + BundleSize uint64 + ToKey string + BundleSummary string + DataHash string + UpdatedAt uint64 + VotersValid []string + VotersInvalid []string + VotersAbstain []string + FromKey string + StorageProviderId uint32 + CompressionId uint32 +} +``` + +## Finalized Bundles +Finalized bundles have their own prefix in the KV-Store. + +### FinalizedBundle +FinalizedBundle has all the important information of a bundle which is saved +forever on the KYVE chain. + +- FinalizedBundle `0x02 | PoolId | Id -> ProtocolBuffer(finalizedBundle)` + +```go +type FinalizedBundle struct { + PoolId uint64 + Id uint64 + StorageId string + Uploader string + FromIndex uint64 + ToIndex uint64 + ToKey string + BundleSummary string + DataHash string + FinalizedAt uint64 + FromKey string + StorageProviderId uint32 + CompressionId uint32 +} +``` diff --git a/x/bundles/spec/03_messages.md b/x/bundles/spec/03_messages.md new file mode 100644 index 00000000..2a610d85 --- /dev/null +++ b/x/bundles/spec/03_messages.md @@ -0,0 +1,41 @@ + + +# Messages + +## MsgSubmitBundleProposal + +With this transaction the uploader of the current proposal round +submits his bundle proposal to the network for others to validate. +The uploader has to be a staker in the storage pool and should be +the designated uploader of this round. The most important property +which gets submitted is the storage id of the proposal. With this +other participants can retrieve the data and validate it for +themselves. Once the proposal is validated the uploader receives +the bundle reward for his effort. + +## MsgVoteBundleProposal + +Once other participants see that a new bundle proposal is available +they validate it. Depending on the result they either vote with valid, +invalid or abstain. Abstain is a special vote which implies that +the validator could not make a decision. If the validator votes with +abstain it is impossible to receive a slash for that in the current round, +but the validator won't be chosen as uploader for the next round either. + +## MsgClaimUploaderRole + +If the storage pool is in genesis state (the pool just got created) or +the last bundle has been dropped for not reaching a required quorum the +fastest participant can claim the current free uploader role. This can +only be called if the next uploader is not defined and the role for the +current round is free. + +## MsgSkipUploaderRole + +This transaction gets called when the uploader can't produce a bundle proposal +for whatever reason. Examples could be the storage provider being offline or +the data source not returning any data. With this the uploader skips his role +and lets another participant try to submit a valid bundle proposal. + diff --git a/x/bundles/spec/04_end_block.md b/x/bundles/spec/04_end_block.md new file mode 100644 index 00000000..aedb2a34 --- /dev/null +++ b/x/bundles/spec/04_end_block.md @@ -0,0 +1,15 @@ + + +# EndBlock + +EndBlock is used to determine if the uploader did not +submit his bundle proposal in a predefined timeout. The penalty +for not uploading in time is a point. If a participant reaches +a certain number of points the participant receives a timeout slash +and gets removed from the storage pool. + +To prevent that the uploader should always upload a bundle proposal. +If he can not do that for whatever reason the uploader should skip +his uploader role, indicating he is not offline. \ No newline at end of file diff --git a/x/bundles/spec/05_params.md b/x/bundles/spec/05_params.md new file mode 100644 index 00000000..60ebc75d --- /dev/null +++ b/x/bundles/spec/05_params.md @@ -0,0 +1,14 @@ + + +# Parameters + +The bundles module contains the following parameters: + +| Key | Type | Example | +|---------------|-------------------------|---------| +| UploadTimeout | uint64 (time s) | 600 | +| StorageCost | uint64 (tkyve per byte) | 25 | +| NetworkFee | string (%) | "0.01" | +| MaxPoints | uint64 | 5 | diff --git a/x/bundles/spec/06_events.md b/x/bundles/spec/06_events.md new file mode 100644 index 00000000..bcc2c805 --- /dev/null +++ b/x/bundles/spec/06_events.md @@ -0,0 +1,217 @@ + + +# Events + +The bundles module contains the following events: + +## EventBundleProposed + +EventBundleProposed indicates that a new bundle proposal was submitted +to a storage pool. This event contains all information about the +proposal. + +```protobuf +syntax = "proto3"; + +message EventBundleProposed { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // internal id for the KYVE-bundle + uint64 id = 2; + // storage_id is the ID to retrieve to data item from the configured storage provider + // e.g. the ARWEAVE-id + string storage_id = 3; + // Address of the uploader/proposer of the bundle + string uploader = 4; + // data_size size in bytes of the data + uint64 data_size = 5; + // from_index starting index of the bundle (inclusive) + uint64 from_index = 6; + // bundle_size amount of data items in the bundle + uint64 bundle_size = 7; + // from_key the key of the first data item in the bundle + string from_key = 8; + // to_key the key of the last data item in the bundle + string to_key = 9; + // bundle_summary is a short string holding some useful information of + // the bundle which will get stored on-chain + string bundle_summary = 10; + // data_hash is a sha256 hash of the raw compressed data + string data_hash = 11; + // proposed_at the unix time when the bundle was proposed + uint64 proposed_at = 12; + // storage_provider_id the unique id of the storage provider where + // the data of the bundle is tored + uint32 storage_provider_id = 13; + // compression_id the unique id of the compression type the data + // of the bundle was compressed with + uint32 compression_id = 14; +} +``` + +It gets thrown from the following actions: + +- MsgSubmitBundleProposal + +## EventBundleFinalized + +EventBundleFinalized indicates that a bundle has been finalized with +a certain vestingStatus. This vestingStatus includes dropped, invalid and valid +bundles. + +```protobuf +syntax = "proto3"; + +message EventBundleFinalized { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // internal id for the KYVE-bundle + uint64 id = 2; + // total voting power which voted for valid + uint64 valid = 3; + // total voting power which voted for invalid + uint64 invalid = 4; + // total voting power which voted for abstain + uint64 abstain = 5; + // total voting power of the pool + uint64 total = 6; + // vestingStatus of the finalized bundle + BundleStatus vestingStatus = 7; + // rewards transferred to treasury (in ukyve) + uint64 reward_treasury = 8; + // rewardUploader rewards directly transferred to uploader (in ukyve) + uint64 reward_uploader = 9; + // rewardDelegation rewards distributed among all delegators (in ukyve) + uint64 reward_delegation = 10; + // rewardTotal the total bundle reward + uint64 reward_total = 11; + // finalized_at the block height where the bundle got finalized + uint64 finalized_at = 12; + // uploader the address of the uploader of this bundle + string uploader = 13; + // next_uploader the address of the next uploader after this bundle + string next_uploader = 14; +} +``` + +It gets thrown from the following actions: + +- MsgSubmitBundleProposal +- EndBlock + +## EventBundleVote + +EventBundleVote indicates that a participant has voted on a bundle. + +```protobuf +syntax = "proto3"; + +message EventBundleVote { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the account staker of the protocol node. + string staker = 2; + // storage_id is the unique ID of the bundle. + string storage_id = 3; + // vote is for what the validator voted with + VoteType vote = 4; +} +``` + +It gets thrown from the following actions: + +- MsgVoteBundleProposal + +## EventClaimUploaderRole + +EventClaimUploaderRole indicates that a participant has claimed +a free uploader role spot in a storage pool. + +```protobuf +syntax = "proto3"; + +message EventClaimedUploaderRole { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // id internal id for the KYVE-bundle + uint64 id = 2; + // new_uploader the address of the participant who claimed + // the free uploader role + string new_uploader = 3; +} +``` + +It gets thrown from the following actions: + +- MsgClaimUploaderRole + +## EventSkippedUploaderRole + +EventSkippedUploaderRole indicates that the current uploader +of a storage pool has skipped his uploader role. + +```protobuf +syntax = "proto3"; + +message EventClaimedUploaderRole { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // id internal id for the KYVE-bundle + uint64 id = 2; + // new_uploader the address of the participant who claimed + // the free uploader role + string new_uploader = 3; +} +``` + +It gets thrown from the following actions: + +- MsgSkipUploaderRole + +## EventPointIncreased + +EventPointIncreased indicates that a staker received a point +for being offline while voting or submitting. + +```protobuf +syntax = "proto3"; + +message EventPointIncreased { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the address of the staker who received the point + string staker = 2; + // current_points is the amount of points the staker has now + uint64 current_points = 3; +} +``` + +It gets thrown from the following actions: + +- MsgSubmitBundleProposal +- EndBlock + +## EventPointsReset + +EventPointsReset indicates that a staker who previously had +some points got his points reset due to being active again. + +```protobuf +syntax = "proto3"; + +message EventPointsReset { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // staker is the address of the staker who has zero points now + string staker = 2; +} +``` + +It gets thrown from the following actions: + +- MsgSubmitBundleProposal +- MsgVoteBundleProposal +- MsgSkipUploaderRole + diff --git a/x/bundles/spec/07_exported.md b/x/bundles/spec/07_exported.md new file mode 100644 index 00000000..02ab7a3a --- /dev/null +++ b/x/bundles/spec/07_exported.md @@ -0,0 +1,8 @@ + + +# Exported + +The `x/bundles` module exports no functions, since it sits on top of all other modules and therefore needs no +exports. diff --git a/x/bundles/types/bundles.pb.go b/x/bundles/types/bundles.pb.go new file mode 100644 index 00000000..891cfa74 --- /dev/null +++ b/x/bundles/types/bundles.pb.go @@ -0,0 +1,1781 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/bundles/v1beta1/bundles.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BundleStatus represents the status of an evaluated bundle +// proposal. +type BundleStatus int32 + +const ( + // BUNDLE_STATUS_UNSPECIFIED ... + BUNDLE_STATUS_UNSPECIFIED BundleStatus = 0 + // BUNDLE_STATUS_VALID ... + BUNDLE_STATUS_VALID BundleStatus = 1 + // BUNDLE_STATUS_INVALID ... + BUNDLE_STATUS_INVALID BundleStatus = 2 + // BUNDLE_STATUS_NO_FUNDS ... + BUNDLE_STATUS_NO_FUNDS BundleStatus = 3 + // BUNDLE_STATUS_NO_QUORUM ... + BUNDLE_STATUS_NO_QUORUM BundleStatus = 4 + // BUNDLE_STATUS_DISABLED ... + BUNDLE_STATUS_DISABLED BundleStatus = 5 +) + +var BundleStatus_name = map[int32]string{ + 0: "BUNDLE_STATUS_UNSPECIFIED", + 1: "BUNDLE_STATUS_VALID", + 2: "BUNDLE_STATUS_INVALID", + 3: "BUNDLE_STATUS_NO_FUNDS", + 4: "BUNDLE_STATUS_NO_QUORUM", + 5: "BUNDLE_STATUS_DISABLED", +} + +var BundleStatus_value = map[string]int32{ + "BUNDLE_STATUS_UNSPECIFIED": 0, + "BUNDLE_STATUS_VALID": 1, + "BUNDLE_STATUS_INVALID": 2, + "BUNDLE_STATUS_NO_FUNDS": 3, + "BUNDLE_STATUS_NO_QUORUM": 4, + "BUNDLE_STATUS_DISABLED": 5, +} + +func (x BundleStatus) String() string { + return proto.EnumName(BundleStatus_name, int32(x)) +} + +func (BundleStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_889cf76d77a4de2b, []int{0} +} + +// BundleProposal represents the current bundle proposal +// of a storage pool +type BundleProposal struct { + // pool_id is the id of the pool for which this proposal is for + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // storage_id is the id with which the data can be retrieved from + StorageId string `protobuf:"bytes,2,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + // uploader is the address of the staker who submitted the current proposal + Uploader string `protobuf:"bytes,3,opt,name=uploader,proto3" json:"uploader,omitempty"` + // next_uploader is the address of the staker who should upload the next proposal + NextUploader string `protobuf:"bytes,4,opt,name=next_uploader,json=nextUploader,proto3" json:"next_uploader,omitempty"` + // data_size the size of the data in bytes + DataSize uint64 `protobuf:"varint,5,opt,name=data_size,json=dataSize,proto3" json:"data_size,omitempty"` + // bundle_size the size of the bundle (amount of data items) + BundleSize uint64 `protobuf:"varint,6,opt,name=bundle_size,json=bundleSize,proto3" json:"bundle_size,omitempty"` + // to_key the key of the last data item in the bundle proposal + ToKey string `protobuf:"bytes,7,opt,name=to_key,json=toKey,proto3" json:"to_key,omitempty"` + // bundle_summary a string summary of the current proposal + BundleSummary string `protobuf:"bytes,8,opt,name=bundle_summary,json=bundleSummary,proto3" json:"bundle_summary,omitempty"` + // data_hash a sha256 hash of the raw compressed data + DataHash string `protobuf:"bytes,9,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // updated_at the last time this proposal was edited + UpdatedAt uint64 `protobuf:"varint,10,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + // voters_valid list of all stakers who voted in favor for current proposal + VotersValid []string `protobuf:"bytes,11,rep,name=voters_valid,json=votersValid,proto3" json:"voters_valid,omitempty"` + // voters_invalid list of all stakers who voted against for current proposal + VotersInvalid []string `protobuf:"bytes,12,rep,name=voters_invalid,json=votersInvalid,proto3" json:"voters_invalid,omitempty"` + // voters_abstain list of all stakers who voted abstain for current proposal + VotersAbstain []string `protobuf:"bytes,13,rep,name=voters_abstain,json=votersAbstain,proto3" json:"voters_abstain,omitempty"` + // from_key the key of the first data item in the bundle proposal + FromKey string `protobuf:"bytes,14,opt,name=from_key,json=fromKey,proto3" json:"from_key,omitempty"` + // storage_provider_id the id of the storage provider where the bundle is stored + StorageProviderId uint32 `protobuf:"varint,15,opt,name=storage_provider_id,json=storageProviderId,proto3" json:"storage_provider_id,omitempty"` + // compression_id the id of the compression type with which the data was compressed + CompressionId uint32 `protobuf:"varint,16,opt,name=compression_id,json=compressionId,proto3" json:"compression_id,omitempty"` +} + +func (m *BundleProposal) Reset() { *m = BundleProposal{} } +func (m *BundleProposal) String() string { return proto.CompactTextString(m) } +func (*BundleProposal) ProtoMessage() {} +func (*BundleProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_889cf76d77a4de2b, []int{0} +} +func (m *BundleProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BundleProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BundleProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BundleProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_BundleProposal.Merge(m, src) +} +func (m *BundleProposal) XXX_Size() int { + return m.Size() +} +func (m *BundleProposal) XXX_DiscardUnknown() { + xxx_messageInfo_BundleProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_BundleProposal proto.InternalMessageInfo + +func (m *BundleProposal) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *BundleProposal) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +func (m *BundleProposal) GetUploader() string { + if m != nil { + return m.Uploader + } + return "" +} + +func (m *BundleProposal) GetNextUploader() string { + if m != nil { + return m.NextUploader + } + return "" +} + +func (m *BundleProposal) GetDataSize() uint64 { + if m != nil { + return m.DataSize + } + return 0 +} + +func (m *BundleProposal) GetBundleSize() uint64 { + if m != nil { + return m.BundleSize + } + return 0 +} + +func (m *BundleProposal) GetToKey() string { + if m != nil { + return m.ToKey + } + return "" +} + +func (m *BundleProposal) GetBundleSummary() string { + if m != nil { + return m.BundleSummary + } + return "" +} + +func (m *BundleProposal) GetDataHash() string { + if m != nil { + return m.DataHash + } + return "" +} + +func (m *BundleProposal) GetUpdatedAt() uint64 { + if m != nil { + return m.UpdatedAt + } + return 0 +} + +func (m *BundleProposal) GetVotersValid() []string { + if m != nil { + return m.VotersValid + } + return nil +} + +func (m *BundleProposal) GetVotersInvalid() []string { + if m != nil { + return m.VotersInvalid + } + return nil +} + +func (m *BundleProposal) GetVotersAbstain() []string { + if m != nil { + return m.VotersAbstain + } + return nil +} + +func (m *BundleProposal) GetFromKey() string { + if m != nil { + return m.FromKey + } + return "" +} + +func (m *BundleProposal) GetStorageProviderId() uint32 { + if m != nil { + return m.StorageProviderId + } + return 0 +} + +func (m *BundleProposal) GetCompressionId() uint32 { + if m != nil { + return m.CompressionId + } + return 0 +} + +// FinalizedBundle represents a bundle proposal where the majority +// agreed on its validity +type FinalizedBundle struct { + // pool_id is the id of the pool for which this proposal is for + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // id is a unique identifier for each finalized bundle in a pool + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // storage_id is the id with which the data can be retrieved from + StorageId string `protobuf:"bytes,3,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + // uploader is the address of the staker who submitted this bundle + Uploader string `protobuf:"bytes,4,opt,name=uploader,proto3" json:"uploader,omitempty"` + // from_index is the index from where the bundle starts (inclusive) + FromIndex uint64 `protobuf:"varint,5,opt,name=from_index,json=fromIndex,proto3" json:"from_index,omitempty"` + // to_index is the index to which the bundle goes (exclusive) + ToIndex uint64 `protobuf:"varint,6,opt,name=to_index,json=toIndex,proto3" json:"to_index,omitempty"` + // to_key the key of the last data item in the bundle proposal + ToKey string `protobuf:"bytes,7,opt,name=to_key,json=toKey,proto3" json:"to_key,omitempty"` + // bundle_summary a string summary of the current proposal + BundleSummary string `protobuf:"bytes,8,opt,name=bundle_summary,json=bundleSummary,proto3" json:"bundle_summary,omitempty"` + // data_hash a sha256 hash of the raw compressed data + DataHash string `protobuf:"bytes,9,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // finalized_at is the block height at which this bundle got finalized + FinalizedAt uint64 `protobuf:"varint,10,opt,name=finalized_at,json=finalizedAt,proto3" json:"finalized_at,omitempty"` + // from_key the key of the first data item in the bundle proposal + FromKey string `protobuf:"bytes,11,opt,name=from_key,json=fromKey,proto3" json:"from_key,omitempty"` + // storage_provider_id the id of the storage provider where the bundle is stored + StorageProviderId uint32 `protobuf:"varint,12,opt,name=storage_provider_id,json=storageProviderId,proto3" json:"storage_provider_id,omitempty"` + // compression_id the id of the compression type with which the data was compressed + CompressionId uint32 `protobuf:"varint,13,opt,name=compression_id,json=compressionId,proto3" json:"compression_id,omitempty"` +} + +func (m *FinalizedBundle) Reset() { *m = FinalizedBundle{} } +func (m *FinalizedBundle) String() string { return proto.CompactTextString(m) } +func (*FinalizedBundle) ProtoMessage() {} +func (*FinalizedBundle) Descriptor() ([]byte, []int) { + return fileDescriptor_889cf76d77a4de2b, []int{1} +} +func (m *FinalizedBundle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FinalizedBundle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FinalizedBundle.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FinalizedBundle) XXX_Merge(src proto.Message) { + xxx_messageInfo_FinalizedBundle.Merge(m, src) +} +func (m *FinalizedBundle) XXX_Size() int { + return m.Size() +} +func (m *FinalizedBundle) XXX_DiscardUnknown() { + xxx_messageInfo_FinalizedBundle.DiscardUnknown(m) +} + +var xxx_messageInfo_FinalizedBundle proto.InternalMessageInfo + +func (m *FinalizedBundle) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *FinalizedBundle) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *FinalizedBundle) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +func (m *FinalizedBundle) GetUploader() string { + if m != nil { + return m.Uploader + } + return "" +} + +func (m *FinalizedBundle) GetFromIndex() uint64 { + if m != nil { + return m.FromIndex + } + return 0 +} + +func (m *FinalizedBundle) GetToIndex() uint64 { + if m != nil { + return m.ToIndex + } + return 0 +} + +func (m *FinalizedBundle) GetToKey() string { + if m != nil { + return m.ToKey + } + return "" +} + +func (m *FinalizedBundle) GetBundleSummary() string { + if m != nil { + return m.BundleSummary + } + return "" +} + +func (m *FinalizedBundle) GetDataHash() string { + if m != nil { + return m.DataHash + } + return "" +} + +func (m *FinalizedBundle) GetFinalizedAt() uint64 { + if m != nil { + return m.FinalizedAt + } + return 0 +} + +func (m *FinalizedBundle) GetFromKey() string { + if m != nil { + return m.FromKey + } + return "" +} + +func (m *FinalizedBundle) GetStorageProviderId() uint32 { + if m != nil { + return m.StorageProviderId + } + return 0 +} + +func (m *FinalizedBundle) GetCompressionId() uint32 { + if m != nil { + return m.CompressionId + } + return 0 +} + +func init() { + proto.RegisterEnum("kyve.bundles.v1beta1.BundleStatus", BundleStatus_name, BundleStatus_value) + proto.RegisterType((*BundleProposal)(nil), "kyve.bundles.v1beta1.BundleProposal") + proto.RegisterType((*FinalizedBundle)(nil), "kyve.bundles.v1beta1.FinalizedBundle") +} + +func init() { + proto.RegisterFile("kyve/bundles/v1beta1/bundles.proto", fileDescriptor_889cf76d77a4de2b) +} + +var fileDescriptor_889cf76d77a4de2b = []byte{ + // 665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x94, 0x41, 0x6e, 0xda, 0x40, + 0x14, 0x86, 0x31, 0x38, 0x80, 0x07, 0x4c, 0xe8, 0x24, 0x69, 0x1c, 0xa2, 0xb8, 0x24, 0x55, 0x25, + 0x54, 0x55, 0xa0, 0xa8, 0x27, 0x80, 0x02, 0xaa, 0x95, 0x94, 0xa4, 0x38, 0x8e, 0xd4, 0x6e, 0xac, + 0x21, 0x9e, 0xc0, 0x28, 0xe0, 0xb1, 0xec, 0x81, 0x86, 0x9c, 0xa0, 0xcb, 0xde, 0xa0, 0x8b, 0xf6, + 0x18, 0x3d, 0x40, 0x97, 0x59, 0x76, 0x59, 0x25, 0x17, 0xa9, 0x66, 0xc6, 0x50, 0x48, 0xda, 0x2a, + 0x9b, 0xee, 0x78, 0xdf, 0xff, 0x0f, 0x6f, 0x9e, 0xdf, 0xaf, 0x01, 0x7b, 0x17, 0xd3, 0x09, 0xae, + 0xf5, 0xc6, 0xbe, 0x37, 0xc4, 0x51, 0x6d, 0xb2, 0xdf, 0xc3, 0x0c, 0xed, 0xcf, 0xea, 0x6a, 0x10, + 0x52, 0x46, 0xe1, 0x3a, 0xf7, 0x54, 0x67, 0x2c, 0xf6, 0x94, 0xd6, 0xfb, 0xb4, 0x4f, 0x85, 0xa1, + 0xc6, 0x7f, 0x49, 0xef, 0xde, 0x57, 0x15, 0x14, 0x1a, 0xc2, 0x79, 0x1c, 0xd2, 0x80, 0x46, 0x68, + 0x08, 0x37, 0x41, 0x26, 0xa0, 0x74, 0xe8, 0x12, 0xcf, 0x50, 0xca, 0x4a, 0x45, 0xed, 0xa6, 0x79, + 0x69, 0x79, 0x70, 0x07, 0x80, 0x88, 0xd1, 0x10, 0xf5, 0x31, 0xd7, 0x92, 0x65, 0xa5, 0xa2, 0x75, + 0xb5, 0x98, 0x58, 0x1e, 0x2c, 0x81, 0xec, 0x38, 0x18, 0x52, 0xe4, 0xe1, 0xd0, 0x48, 0x09, 0x71, + 0x5e, 0xc3, 0xa7, 0x40, 0xf7, 0xf1, 0x25, 0x73, 0xe7, 0x06, 0x55, 0x18, 0xf2, 0x1c, 0x3a, 0x33, + 0xd3, 0x36, 0xd0, 0x3c, 0xc4, 0x90, 0x1b, 0x91, 0x2b, 0x6c, 0xac, 0x88, 0xd6, 0x59, 0x0e, 0x6c, + 0x72, 0x85, 0xe1, 0x13, 0x90, 0x93, 0x13, 0x49, 0x39, 0x2d, 0x64, 0x20, 0x91, 0x30, 0x6c, 0x80, + 0x34, 0xa3, 0xee, 0x05, 0x9e, 0x1a, 0x19, 0xf1, 0xdf, 0x2b, 0x8c, 0x1e, 0xe0, 0x29, 0x7c, 0x06, + 0x0a, 0xb3, 0x73, 0xe3, 0xd1, 0x08, 0x85, 0x53, 0x23, 0x2b, 0x64, 0x3d, 0x3e, 0x2a, 0xe1, 0xbc, + 0xf7, 0x00, 0x45, 0x03, 0x43, 0x93, 0xb7, 0xe7, 0xe0, 0x35, 0x8a, 0x06, 0x7c, 0xf0, 0x71, 0xe0, + 0x21, 0x86, 0x3d, 0x17, 0x31, 0x03, 0x88, 0xd6, 0x5a, 0x4c, 0xea, 0x0c, 0xee, 0x82, 0xfc, 0x84, + 0x32, 0x1c, 0x46, 0xee, 0x04, 0x0d, 0x89, 0x67, 0xe4, 0xca, 0xa9, 0x8a, 0xd6, 0xcd, 0x49, 0x76, + 0xca, 0x11, 0xbf, 0x45, 0x6c, 0x21, 0xbe, 0x34, 0xe5, 0x85, 0x49, 0x97, 0xd4, 0x92, 0x70, 0xc1, + 0x86, 0x7a, 0x11, 0x43, 0xc4, 0x37, 0xf4, 0x45, 0x5b, 0x5d, 0x42, 0xb8, 0x05, 0xb2, 0xe7, 0x21, + 0x1d, 0x89, 0x61, 0x0b, 0xe2, 0xae, 0x19, 0x5e, 0xf3, 0x71, 0xab, 0x60, 0x6d, 0xb6, 0xa3, 0x20, + 0xa4, 0x13, 0xe2, 0xe1, 0x90, 0x2f, 0x6b, 0xb5, 0xac, 0x54, 0xf4, 0xee, 0xa3, 0x58, 0x3a, 0x8e, + 0x15, 0x4b, 0x74, 0x3c, 0xa3, 0xa3, 0x20, 0xc4, 0x51, 0x44, 0xa8, 0xcf, 0xad, 0x45, 0x61, 0xd5, + 0x17, 0xa8, 0xe5, 0xed, 0x7d, 0x4e, 0x81, 0xd5, 0x36, 0xf1, 0xd1, 0x90, 0x5c, 0x61, 0x4f, 0xe6, + 0xe5, 0xef, 0x39, 0x29, 0x80, 0x64, 0x9c, 0x0f, 0xb5, 0x9b, 0x24, 0x77, 0x73, 0x93, 0xfa, 0x57, + 0x6e, 0xd4, 0x3b, 0xb9, 0xd9, 0x01, 0x40, 0x4c, 0x4a, 0x7c, 0x0f, 0x5f, 0xc6, 0x99, 0xd0, 0x38, + 0xb1, 0x38, 0xe0, 0x1f, 0x82, 0xd1, 0x58, 0x94, 0x89, 0xc8, 0x30, 0x2a, 0xa5, 0xff, 0x18, 0x87, + 0x5d, 0x90, 0x3f, 0x9f, 0x7d, 0x8b, 0xdf, 0x81, 0xc8, 0xcd, 0x59, 0x9d, 0x2d, 0x6d, 0x28, 0xf7, + 0xa0, 0x0d, 0xe5, 0x1f, 0xbe, 0x21, 0xfd, 0x0f, 0x1b, 0x7a, 0xfe, 0x4d, 0x01, 0x79, 0xb9, 0x18, + 0x9b, 0x21, 0x36, 0x8e, 0xe0, 0x0e, 0xd8, 0x6a, 0x38, 0x9d, 0xe6, 0x61, 0xcb, 0xb5, 0x4f, 0xea, + 0x27, 0x8e, 0xed, 0x3a, 0x1d, 0xfb, 0xb8, 0xf5, 0xca, 0x6a, 0x5b, 0xad, 0x66, 0x31, 0x01, 0x37, + 0xc1, 0xda, 0xb2, 0x7c, 0x5a, 0x3f, 0xb4, 0x9a, 0x45, 0x05, 0x6e, 0x81, 0x8d, 0x65, 0xc1, 0xea, + 0x48, 0x29, 0x09, 0x4b, 0xe0, 0xf1, 0xb2, 0xd4, 0x39, 0x72, 0xdb, 0x4e, 0xa7, 0x69, 0x17, 0x53, + 0x70, 0x1b, 0x6c, 0xde, 0xd3, 0xde, 0x3a, 0x47, 0x5d, 0xe7, 0x4d, 0x51, 0xbd, 0x7f, 0xb0, 0x69, + 0xd9, 0xf5, 0xc6, 0x61, 0xab, 0x59, 0x5c, 0x29, 0xa9, 0x1f, 0xbf, 0x98, 0x89, 0x46, 0xfb, 0xfb, + 0x8d, 0xa9, 0x5c, 0xdf, 0x98, 0xca, 0xcf, 0x1b, 0x53, 0xf9, 0x74, 0x6b, 0x26, 0xae, 0x6f, 0xcd, + 0xc4, 0x8f, 0x5b, 0x33, 0xf1, 0xfe, 0x45, 0x9f, 0xb0, 0xc1, 0xb8, 0x57, 0x3d, 0xa3, 0xa3, 0xda, + 0xc1, 0xbb, 0xd3, 0x56, 0x07, 0xb3, 0x0f, 0x34, 0xbc, 0xa8, 0x9d, 0x0d, 0x10, 0xf1, 0x6b, 0x97, + 0xf3, 0xb7, 0x90, 0x4d, 0x03, 0x1c, 0xf5, 0xd2, 0xe2, 0x59, 0x7b, 0xf9, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0x41, 0x08, 0xc6, 0x2f, 0x28, 0x05, 0x00, 0x00, +} + +func (m *BundleProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BundleProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BundleProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CompressionId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.CompressionId)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.StorageProviderId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.StorageProviderId)) + i-- + dAtA[i] = 0x78 + } + if len(m.FromKey) > 0 { + i -= len(m.FromKey) + copy(dAtA[i:], m.FromKey) + i = encodeVarintBundles(dAtA, i, uint64(len(m.FromKey))) + i-- + dAtA[i] = 0x72 + } + if len(m.VotersAbstain) > 0 { + for iNdEx := len(m.VotersAbstain) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VotersAbstain[iNdEx]) + copy(dAtA[i:], m.VotersAbstain[iNdEx]) + i = encodeVarintBundles(dAtA, i, uint64(len(m.VotersAbstain[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } + if len(m.VotersInvalid) > 0 { + for iNdEx := len(m.VotersInvalid) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VotersInvalid[iNdEx]) + copy(dAtA[i:], m.VotersInvalid[iNdEx]) + i = encodeVarintBundles(dAtA, i, uint64(len(m.VotersInvalid[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } + if len(m.VotersValid) > 0 { + for iNdEx := len(m.VotersValid) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VotersValid[iNdEx]) + copy(dAtA[i:], m.VotersValid[iNdEx]) + i = encodeVarintBundles(dAtA, i, uint64(len(m.VotersValid[iNdEx]))) + i-- + dAtA[i] = 0x5a + } + } + if m.UpdatedAt != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.UpdatedAt)) + i-- + dAtA[i] = 0x50 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintBundles(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x4a + } + if len(m.BundleSummary) > 0 { + i -= len(m.BundleSummary) + copy(dAtA[i:], m.BundleSummary) + i = encodeVarintBundles(dAtA, i, uint64(len(m.BundleSummary))) + i-- + dAtA[i] = 0x42 + } + if len(m.ToKey) > 0 { + i -= len(m.ToKey) + copy(dAtA[i:], m.ToKey) + i = encodeVarintBundles(dAtA, i, uint64(len(m.ToKey))) + i-- + dAtA[i] = 0x3a + } + if m.BundleSize != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.BundleSize)) + i-- + dAtA[i] = 0x30 + } + if m.DataSize != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.DataSize)) + i-- + dAtA[i] = 0x28 + } + if len(m.NextUploader) > 0 { + i -= len(m.NextUploader) + copy(dAtA[i:], m.NextUploader) + i = encodeVarintBundles(dAtA, i, uint64(len(m.NextUploader))) + i-- + dAtA[i] = 0x22 + } + if len(m.Uploader) > 0 { + i -= len(m.Uploader) + copy(dAtA[i:], m.Uploader) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Uploader))) + i-- + dAtA[i] = 0x1a + } + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintBundles(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FinalizedBundle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FinalizedBundle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FinalizedBundle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CompressionId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.CompressionId)) + i-- + dAtA[i] = 0x68 + } + if m.StorageProviderId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.StorageProviderId)) + i-- + dAtA[i] = 0x60 + } + if len(m.FromKey) > 0 { + i -= len(m.FromKey) + copy(dAtA[i:], m.FromKey) + i = encodeVarintBundles(dAtA, i, uint64(len(m.FromKey))) + i-- + dAtA[i] = 0x5a + } + if m.FinalizedAt != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.FinalizedAt)) + i-- + dAtA[i] = 0x50 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintBundles(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x4a + } + if len(m.BundleSummary) > 0 { + i -= len(m.BundleSummary) + copy(dAtA[i:], m.BundleSummary) + i = encodeVarintBundles(dAtA, i, uint64(len(m.BundleSummary))) + i-- + dAtA[i] = 0x42 + } + if len(m.ToKey) > 0 { + i -= len(m.ToKey) + copy(dAtA[i:], m.ToKey) + i = encodeVarintBundles(dAtA, i, uint64(len(m.ToKey))) + i-- + dAtA[i] = 0x3a + } + if m.ToIndex != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.ToIndex)) + i-- + dAtA[i] = 0x30 + } + if m.FromIndex != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.FromIndex)) + i-- + dAtA[i] = 0x28 + } + if len(m.Uploader) > 0 { + i -= len(m.Uploader) + copy(dAtA[i:], m.Uploader) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Uploader))) + i-- + dAtA[i] = 0x22 + } + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintBundles(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x1a + } + if m.Id != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintBundles(dAtA []byte, offset int, v uint64) int { + offset -= sovBundles(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BundleProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.Uploader) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.NextUploader) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.DataSize != 0 { + n += 1 + sovBundles(uint64(m.DataSize)) + } + if m.BundleSize != 0 { + n += 1 + sovBundles(uint64(m.BundleSize)) + } + l = len(m.ToKey) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.BundleSummary) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.UpdatedAt != 0 { + n += 1 + sovBundles(uint64(m.UpdatedAt)) + } + if len(m.VotersValid) > 0 { + for _, s := range m.VotersValid { + l = len(s) + n += 1 + l + sovBundles(uint64(l)) + } + } + if len(m.VotersInvalid) > 0 { + for _, s := range m.VotersInvalid { + l = len(s) + n += 1 + l + sovBundles(uint64(l)) + } + } + if len(m.VotersAbstain) > 0 { + for _, s := range m.VotersAbstain { + l = len(s) + n += 1 + l + sovBundles(uint64(l)) + } + } + l = len(m.FromKey) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.StorageProviderId != 0 { + n += 1 + sovBundles(uint64(m.StorageProviderId)) + } + if m.CompressionId != 0 { + n += 2 + sovBundles(uint64(m.CompressionId)) + } + return n +} + +func (m *FinalizedBundle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + if m.Id != 0 { + n += 1 + sovBundles(uint64(m.Id)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.Uploader) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.FromIndex != 0 { + n += 1 + sovBundles(uint64(m.FromIndex)) + } + if m.ToIndex != 0 { + n += 1 + sovBundles(uint64(m.ToIndex)) + } + l = len(m.ToKey) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.BundleSummary) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.FinalizedAt != 0 { + n += 1 + sovBundles(uint64(m.FinalizedAt)) + } + l = len(m.FromKey) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.StorageProviderId != 0 { + n += 1 + sovBundles(uint64(m.StorageProviderId)) + } + if m.CompressionId != 0 { + n += 1 + sovBundles(uint64(m.CompressionId)) + } + return n +} + +func sovBundles(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBundles(x uint64) (n int) { + return sovBundles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BundleProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BundleProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BundleProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextUploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextUploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSize", wireType) + } + m.DataSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSize", wireType) + } + m.BundleSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BundleSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSummary", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BundleSummary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + m.UpdatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VotersValid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VotersValid = append(m.VotersValid, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VotersInvalid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VotersInvalid = append(m.VotersInvalid, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VotersAbstain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VotersAbstain = append(m.VotersAbstain, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageProviderId", wireType) + } + m.StorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressionId", wireType) + } + m.CompressionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressionId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FinalizedBundle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FinalizedBundle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FinalizedBundle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIndex", wireType) + } + m.FromIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FromIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ToIndex", wireType) + } + m.ToIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ToIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSummary", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BundleSummary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedAt", wireType) + } + m.FinalizedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FinalizedAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageProviderId", wireType) + } + m.StorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressionId", wireType) + } + m.CompressionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressionId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBundles(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBundles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBundles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBundles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBundles + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBundles + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBundles + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBundles = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBundles = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBundles = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/bundles/types/codec.go b/x/bundles/types/codec.go new file mode 100644 index 00000000..c04e0578 --- /dev/null +++ b/x/bundles/types/codec.go @@ -0,0 +1,22 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterCodec(_ *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgSubmitBundleProposal{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgVoteBundleProposal{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgClaimUploaderRole{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgSkipUploaderRole{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateParams{}) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/bundles/types/errors.go b/x/bundles/types/errors.go new file mode 100644 index 00000000..ec5a341f --- /dev/null +++ b/x/bundles/types/errors.go @@ -0,0 +1,26 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// x/bundles module sentinel errors +var ( + ErrUploaderAlreadyClaimed = sdkerrors.Register(ModuleName, 1100, "uploader role already claimed") + ErrInvalidArgs = sdkerrors.Register(ModuleName, 1107, "invalid args") + ErrFromIndex = sdkerrors.Register(ModuleName, 1118, "invalid from index") + ErrNotDesignatedUploader = sdkerrors.Register(ModuleName, 1113, "not designated uploader") + ErrUploadInterval = sdkerrors.Register(ModuleName, 1108, "upload interval not surpassed") + ErrMaxBundleSize = sdkerrors.Register(ModuleName, 1109, "max bundle size was surpassed") + ErrQuorumNotReached = sdkerrors.Register(ModuleName, 1111, "no quorum reached") + ErrInvalidVote = sdkerrors.Register(ModuleName, 1119, "invalid vote %v") + ErrInvalidStorageId = sdkerrors.Register(ModuleName, 1120, "current storageId %v does not match provided storageId") + ErrPoolDisabled = sdkerrors.Register(ModuleName, 1121, "pool is disabled") + ErrPoolCurrentlyUpgrading = sdkerrors.Register(ModuleName, 1122, "pool currently upgrading") + ErrMinDelegationNotReached = sdkerrors.Register(ModuleName, 1200, "min delegation not reached") + ErrPoolOutOfFunds = sdkerrors.Register(ModuleName, 1201, "pool is out of funds") + ErrBundleDropped = sdkerrors.Register(ModuleName, 1202, "bundle proposal is dropped") + ErrAlreadyVotedValid = sdkerrors.Register(ModuleName, 1204, "already voted valid on bundle proposal") + ErrAlreadyVotedInvalid = sdkerrors.Register(ModuleName, 1205, "already voted invalid on bundle proposal") + ErrAlreadyVotedAbstain = sdkerrors.Register(ModuleName, 1206, "already voted abstain on bundle proposal") +) diff --git a/x/bundles/types/events.pb.go b/x/bundles/types/events.pb.go new file mode 100644 index 00000000..e5a803ee --- /dev/null +++ b/x/bundles/types/events.pb.go @@ -0,0 +1,2859 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/bundles/v1beta1/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventBundleVote is an event emitted when a protocol node votes on a bundle. +// emitted_by: MsgVoteBundleProposal +type EventBundleVote struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker is the account staker of the protocol node. + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // storage_id is the unique ID of the bundle. + StorageId string `protobuf:"bytes,3,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + // vote is for what the validator voted with + Vote VoteType `protobuf:"varint,4,opt,name=vote,proto3,enum=kyve.bundles.v1beta1.VoteType" json:"vote,omitempty"` +} + +func (m *EventBundleVote) Reset() { *m = EventBundleVote{} } +func (m *EventBundleVote) String() string { return proto.CompactTextString(m) } +func (*EventBundleVote) ProtoMessage() {} +func (*EventBundleVote) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{0} +} +func (m *EventBundleVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventBundleVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventBundleVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventBundleVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventBundleVote.Merge(m, src) +} +func (m *EventBundleVote) XXX_Size() int { + return m.Size() +} +func (m *EventBundleVote) XXX_DiscardUnknown() { + xxx_messageInfo_EventBundleVote.DiscardUnknown(m) +} + +var xxx_messageInfo_EventBundleVote proto.InternalMessageInfo + +func (m *EventBundleVote) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventBundleVote) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventBundleVote) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +func (m *EventBundleVote) GetVote() VoteType { + if m != nil { + return m.Vote + } + return VOTE_TYPE_UNSPECIFIED +} + +// EventBundleProposed is submitted by the MsgSubmitBundleProposal message +// emitted_by: MsgSubmitBundleProposal +type EventBundleProposed struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // internal id for the KYVE-bundle + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // storage_id is the ID to retrieve to data item from the configured storage provider + // e.g. the ARWEAVE-id + StorageId string `protobuf:"bytes,3,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + // Address of the uploader/proposer of the bundle + Uploader string `protobuf:"bytes,4,opt,name=uploader,proto3" json:"uploader,omitempty"` + // data_size size in bytes of the data + DataSize uint64 `protobuf:"varint,5,opt,name=data_size,json=dataSize,proto3" json:"data_size,omitempty"` + // from_index starting index of the bundle (inclusive) + FromIndex uint64 `protobuf:"varint,6,opt,name=from_index,json=fromIndex,proto3" json:"from_index,omitempty"` + // bundle_size amount of data items in the bundle + BundleSize uint64 `protobuf:"varint,7,opt,name=bundle_size,json=bundleSize,proto3" json:"bundle_size,omitempty"` + // from_key the key of the first data item in the bundle + FromKey string `protobuf:"bytes,8,opt,name=from_key,json=fromKey,proto3" json:"from_key,omitempty"` + // to_key the key of the last data item in the bundle + ToKey string `protobuf:"bytes,9,opt,name=to_key,json=toKey,proto3" json:"to_key,omitempty"` + // bundle_summary is a short string holding some useful information of + // the bundle which will get stored on-chain + BundleSummary string `protobuf:"bytes,10,opt,name=bundle_summary,json=bundleSummary,proto3" json:"bundle_summary,omitempty"` + // data_hash is a sha256 hash of the raw compressed data + DataHash string `protobuf:"bytes,11,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // proposed_at the unix time when the bundle was proposed + ProposedAt uint64 `protobuf:"varint,12,opt,name=proposed_at,json=proposedAt,proto3" json:"proposed_at,omitempty"` + // storage_provider_id the unique id of the storage provider where + // the data of the bundle is tored + StorageProviderId uint32 `protobuf:"varint,13,opt,name=storage_provider_id,json=storageProviderId,proto3" json:"storage_provider_id,omitempty"` + // compression_id the unique id of the compression type the data + // of the bundle was compressed with + CompressionId uint32 `protobuf:"varint,14,opt,name=compression_id,json=compressionId,proto3" json:"compression_id,omitempty"` +} + +func (m *EventBundleProposed) Reset() { *m = EventBundleProposed{} } +func (m *EventBundleProposed) String() string { return proto.CompactTextString(m) } +func (*EventBundleProposed) ProtoMessage() {} +func (*EventBundleProposed) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{1} +} +func (m *EventBundleProposed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventBundleProposed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventBundleProposed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventBundleProposed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventBundleProposed.Merge(m, src) +} +func (m *EventBundleProposed) XXX_Size() int { + return m.Size() +} +func (m *EventBundleProposed) XXX_DiscardUnknown() { + xxx_messageInfo_EventBundleProposed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventBundleProposed proto.InternalMessageInfo + +func (m *EventBundleProposed) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventBundleProposed) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventBundleProposed) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +func (m *EventBundleProposed) GetUploader() string { + if m != nil { + return m.Uploader + } + return "" +} + +func (m *EventBundleProposed) GetDataSize() uint64 { + if m != nil { + return m.DataSize + } + return 0 +} + +func (m *EventBundleProposed) GetFromIndex() uint64 { + if m != nil { + return m.FromIndex + } + return 0 +} + +func (m *EventBundleProposed) GetBundleSize() uint64 { + if m != nil { + return m.BundleSize + } + return 0 +} + +func (m *EventBundleProposed) GetFromKey() string { + if m != nil { + return m.FromKey + } + return "" +} + +func (m *EventBundleProposed) GetToKey() string { + if m != nil { + return m.ToKey + } + return "" +} + +func (m *EventBundleProposed) GetBundleSummary() string { + if m != nil { + return m.BundleSummary + } + return "" +} + +func (m *EventBundleProposed) GetDataHash() string { + if m != nil { + return m.DataHash + } + return "" +} + +func (m *EventBundleProposed) GetProposedAt() uint64 { + if m != nil { + return m.ProposedAt + } + return 0 +} + +func (m *EventBundleProposed) GetStorageProviderId() uint32 { + if m != nil { + return m.StorageProviderId + } + return 0 +} + +func (m *EventBundleProposed) GetCompressionId() uint32 { + if m != nil { + return m.CompressionId + } + return 0 +} + +// EventBundleFinalized is an event emitted when a bundle is finalised. +// emitted_by: MsgSubmitBundleProposal, EndBlock +type EventBundleFinalized struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // internal id for the KYVE-bundle + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // total voting power which voted for valid + Valid uint64 `protobuf:"varint,3,opt,name=valid,proto3" json:"valid,omitempty"` + // total voting power which voted for invalid + Invalid uint64 `protobuf:"varint,4,opt,name=invalid,proto3" json:"invalid,omitempty"` + // total voting power which voted for abstain + Abstain uint64 `protobuf:"varint,5,opt,name=abstain,proto3" json:"abstain,omitempty"` + // total voting power of the pool + Total uint64 `protobuf:"varint,6,opt,name=total,proto3" json:"total,omitempty"` + // status of the finalized bundle + Status BundleStatus `protobuf:"varint,7,opt,name=status,proto3,enum=kyve.bundles.v1beta1.BundleStatus" json:"status,omitempty"` + // rewards transferred to treasury (in ukyve) + RewardTreasury uint64 `protobuf:"varint,8,opt,name=reward_treasury,json=rewardTreasury,proto3" json:"reward_treasury,omitempty"` + // rewardUploader rewards directly transferred to uploader (in ukyve) + RewardUploader uint64 `protobuf:"varint,9,opt,name=reward_uploader,json=rewardUploader,proto3" json:"reward_uploader,omitempty"` + // rewardDelegation rewards distributed among all delegators (in ukyve) + RewardDelegation uint64 `protobuf:"varint,10,opt,name=reward_delegation,json=rewardDelegation,proto3" json:"reward_delegation,omitempty"` + // rewardTotal the total bundle reward + RewardTotal uint64 `protobuf:"varint,11,opt,name=reward_total,json=rewardTotal,proto3" json:"reward_total,omitempty"` + // finalized_at the block height where the bundle got finalized + FinalizedAt uint64 `protobuf:"varint,12,opt,name=finalized_at,json=finalizedAt,proto3" json:"finalized_at,omitempty"` + // uploader the address of the uploader of this bundle + Uploader string `protobuf:"bytes,13,opt,name=uploader,proto3" json:"uploader,omitempty"` + // next_uploader the address of the next uploader after this bundle + NextUploader string `protobuf:"bytes,14,opt,name=next_uploader,json=nextUploader,proto3" json:"next_uploader,omitempty"` +} + +func (m *EventBundleFinalized) Reset() { *m = EventBundleFinalized{} } +func (m *EventBundleFinalized) String() string { return proto.CompactTextString(m) } +func (*EventBundleFinalized) ProtoMessage() {} +func (*EventBundleFinalized) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{2} +} +func (m *EventBundleFinalized) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventBundleFinalized) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventBundleFinalized.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventBundleFinalized) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventBundleFinalized.Merge(m, src) +} +func (m *EventBundleFinalized) XXX_Size() int { + return m.Size() +} +func (m *EventBundleFinalized) XXX_DiscardUnknown() { + xxx_messageInfo_EventBundleFinalized.DiscardUnknown(m) +} + +var xxx_messageInfo_EventBundleFinalized proto.InternalMessageInfo + +func (m *EventBundleFinalized) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventBundleFinalized) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventBundleFinalized) GetValid() uint64 { + if m != nil { + return m.Valid + } + return 0 +} + +func (m *EventBundleFinalized) GetInvalid() uint64 { + if m != nil { + return m.Invalid + } + return 0 +} + +func (m *EventBundleFinalized) GetAbstain() uint64 { + if m != nil { + return m.Abstain + } + return 0 +} + +func (m *EventBundleFinalized) GetTotal() uint64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *EventBundleFinalized) GetStatus() BundleStatus { + if m != nil { + return m.Status + } + return BUNDLE_STATUS_UNSPECIFIED +} + +func (m *EventBundleFinalized) GetRewardTreasury() uint64 { + if m != nil { + return m.RewardTreasury + } + return 0 +} + +func (m *EventBundleFinalized) GetRewardUploader() uint64 { + if m != nil { + return m.RewardUploader + } + return 0 +} + +func (m *EventBundleFinalized) GetRewardDelegation() uint64 { + if m != nil { + return m.RewardDelegation + } + return 0 +} + +func (m *EventBundleFinalized) GetRewardTotal() uint64 { + if m != nil { + return m.RewardTotal + } + return 0 +} + +func (m *EventBundleFinalized) GetFinalizedAt() uint64 { + if m != nil { + return m.FinalizedAt + } + return 0 +} + +func (m *EventBundleFinalized) GetUploader() string { + if m != nil { + return m.Uploader + } + return "" +} + +func (m *EventBundleFinalized) GetNextUploader() string { + if m != nil { + return m.NextUploader + } + return "" +} + +// EventClaimedUploaderRole is an event emitted when an uploader claims the uploader role +// emitted_by: MsgClaimUploaderRole +type EventClaimedUploaderRole struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // id internal id for the KYVE-bundle + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // new_uploader the address of the participant who claimed + // the free uploader role + NewUploader string `protobuf:"bytes,3,opt,name=new_uploader,json=newUploader,proto3" json:"new_uploader,omitempty"` +} + +func (m *EventClaimedUploaderRole) Reset() { *m = EventClaimedUploaderRole{} } +func (m *EventClaimedUploaderRole) String() string { return proto.CompactTextString(m) } +func (*EventClaimedUploaderRole) ProtoMessage() {} +func (*EventClaimedUploaderRole) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{3} +} +func (m *EventClaimedUploaderRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventClaimedUploaderRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventClaimedUploaderRole.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventClaimedUploaderRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventClaimedUploaderRole.Merge(m, src) +} +func (m *EventClaimedUploaderRole) XXX_Size() int { + return m.Size() +} +func (m *EventClaimedUploaderRole) XXX_DiscardUnknown() { + xxx_messageInfo_EventClaimedUploaderRole.DiscardUnknown(m) +} + +var xxx_messageInfo_EventClaimedUploaderRole proto.InternalMessageInfo + +func (m *EventClaimedUploaderRole) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventClaimedUploaderRole) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventClaimedUploaderRole) GetNewUploader() string { + if m != nil { + return m.NewUploader + } + return "" +} + +// EventSkippedUploaderRole is an event emitted when an uploader skips the upload +// emitted_by: MsgSkipUploaderRole +type EventSkippedUploaderRole struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // id internal id for the KYVE-bundle + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // previous_uploader is the address of the staker who skipped his uploader role + PreviousUploader string `protobuf:"bytes,3,opt,name=previous_uploader,json=previousUploader,proto3" json:"previous_uploader,omitempty"` + // new_uploader is the address of the new uploader who got automatically selected + NewUploader string `protobuf:"bytes,4,opt,name=new_uploader,json=newUploader,proto3" json:"new_uploader,omitempty"` +} + +func (m *EventSkippedUploaderRole) Reset() { *m = EventSkippedUploaderRole{} } +func (m *EventSkippedUploaderRole) String() string { return proto.CompactTextString(m) } +func (*EventSkippedUploaderRole) ProtoMessage() {} +func (*EventSkippedUploaderRole) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{4} +} +func (m *EventSkippedUploaderRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSkippedUploaderRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventSkippedUploaderRole.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventSkippedUploaderRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSkippedUploaderRole.Merge(m, src) +} +func (m *EventSkippedUploaderRole) XXX_Size() int { + return m.Size() +} +func (m *EventSkippedUploaderRole) XXX_DiscardUnknown() { + xxx_messageInfo_EventSkippedUploaderRole.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSkippedUploaderRole proto.InternalMessageInfo + +func (m *EventSkippedUploaderRole) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventSkippedUploaderRole) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventSkippedUploaderRole) GetPreviousUploader() string { + if m != nil { + return m.PreviousUploader + } + return "" +} + +func (m *EventSkippedUploaderRole) GetNewUploader() string { + if m != nil { + return m.NewUploader + } + return "" +} + +// EventPointIncreased is an event emitted when a staker receives a point +// emitted_by: MsgSubmitBundleProposal, EndBlock +type EventPointIncreased struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker is the address of the staker who received the point + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // current_points is the amount of points the staker has now + CurrentPoints uint64 `protobuf:"varint,3,opt,name=current_points,json=currentPoints,proto3" json:"current_points,omitempty"` +} + +func (m *EventPointIncreased) Reset() { *m = EventPointIncreased{} } +func (m *EventPointIncreased) String() string { return proto.CompactTextString(m) } +func (*EventPointIncreased) ProtoMessage() {} +func (*EventPointIncreased) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{5} +} +func (m *EventPointIncreased) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventPointIncreased) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventPointIncreased.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventPointIncreased) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventPointIncreased.Merge(m, src) +} +func (m *EventPointIncreased) XXX_Size() int { + return m.Size() +} +func (m *EventPointIncreased) XXX_DiscardUnknown() { + xxx_messageInfo_EventPointIncreased.DiscardUnknown(m) +} + +var xxx_messageInfo_EventPointIncreased proto.InternalMessageInfo + +func (m *EventPointIncreased) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventPointIncreased) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventPointIncreased) GetCurrentPoints() uint64 { + if m != nil { + return m.CurrentPoints + } + return 0 +} + +// EventPointIncreased is an event emitted when a staker receives a point +// emitted_by: MsgSubmitBundleProposal, EndBlock +type EventPointsReset struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker is the address of the staker who has zero points now + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` +} + +func (m *EventPointsReset) Reset() { *m = EventPointsReset{} } +func (m *EventPointsReset) String() string { return proto.CompactTextString(m) } +func (*EventPointsReset) ProtoMessage() {} +func (*EventPointsReset) Descriptor() ([]byte, []int) { + return fileDescriptor_a02f505e55d81e92, []int{6} +} +func (m *EventPointsReset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventPointsReset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventPointsReset.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventPointsReset) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventPointsReset.Merge(m, src) +} +func (m *EventPointsReset) XXX_Size() int { + return m.Size() +} +func (m *EventPointsReset) XXX_DiscardUnknown() { + xxx_messageInfo_EventPointsReset.DiscardUnknown(m) +} + +var xxx_messageInfo_EventPointsReset proto.InternalMessageInfo + +func (m *EventPointsReset) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventPointsReset) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func init() { + proto.RegisterType((*EventBundleVote)(nil), "kyve.bundles.v1beta1.EventBundleVote") + proto.RegisterType((*EventBundleProposed)(nil), "kyve.bundles.v1beta1.EventBundleProposed") + proto.RegisterType((*EventBundleFinalized)(nil), "kyve.bundles.v1beta1.EventBundleFinalized") + proto.RegisterType((*EventClaimedUploaderRole)(nil), "kyve.bundles.v1beta1.EventClaimedUploaderRole") + proto.RegisterType((*EventSkippedUploaderRole)(nil), "kyve.bundles.v1beta1.EventSkippedUploaderRole") + proto.RegisterType((*EventPointIncreased)(nil), "kyve.bundles.v1beta1.EventPointIncreased") + proto.RegisterType((*EventPointsReset)(nil), "kyve.bundles.v1beta1.EventPointsReset") +} + +func init() { proto.RegisterFile("kyve/bundles/v1beta1/events.proto", fileDescriptor_a02f505e55d81e92) } + +var fileDescriptor_a02f505e55d81e92 = []byte{ + // 775 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4b, 0x4f, 0x1b, 0x49, + 0x10, 0x66, 0x60, 0xf0, 0xa3, 0xfd, 0x00, 0x06, 0x76, 0x77, 0x96, 0x15, 0x5e, 0xdb, 0xab, 0xd5, + 0x5a, 0x62, 0x65, 0x0b, 0xf6, 0xb6, 0x37, 0x60, 0x41, 0x6b, 0x21, 0x45, 0x68, 0x20, 0x48, 0xc9, + 0xc5, 0x6a, 0x7b, 0x0a, 0xdc, 0xf2, 0x78, 0x7a, 0xd4, 0xdd, 0xe3, 0x07, 0xbf, 0x22, 0x52, 0x94, + 0x7f, 0x94, 0x43, 0x8e, 0x1c, 0x73, 0x4c, 0xe0, 0x8f, 0x44, 0xfd, 0x98, 0xb1, 0x43, 0x4c, 0x12, + 0x72, 0xac, 0xaf, 0xbe, 0xaa, 0xfa, 0xba, 0xbf, 0xea, 0x19, 0x54, 0x1b, 0x4c, 0x47, 0xd0, 0xea, + 0xc6, 0xa1, 0x1f, 0x00, 0x6f, 0x8d, 0xf6, 0xba, 0x20, 0xf0, 0x5e, 0x0b, 0x46, 0x10, 0x0a, 0xde, + 0x8c, 0x18, 0x15, 0xd4, 0xd9, 0x92, 0x94, 0xa6, 0xa1, 0x34, 0x0d, 0x65, 0xbb, 0xbe, 0xb0, 0x30, + 0x61, 0xa9, 0xca, 0xed, 0x9d, 0x85, 0x1c, 0x31, 0xd1, 0xe9, 0xfa, 0x1b, 0x0b, 0xad, 0x1d, 0xcb, + 0x49, 0x87, 0x8a, 0x71, 0x49, 0x05, 0x38, 0xbf, 0xa0, 0x6c, 0x44, 0x69, 0xd0, 0x21, 0xbe, 0x6b, + 0x55, 0xad, 0x86, 0xed, 0x65, 0x64, 0xd8, 0xf6, 0x9d, 0x9f, 0x51, 0x86, 0x0b, 0x3c, 0x00, 0xe6, + 0x2e, 0x57, 0xad, 0x46, 0xde, 0x33, 0x91, 0xb3, 0x83, 0x10, 0x17, 0x94, 0xe1, 0x6b, 0x90, 0x35, + 0x2b, 0x2a, 0x97, 0x37, 0x48, 0xdb, 0x77, 0xf6, 0x91, 0x3d, 0xa2, 0x02, 0x5c, 0xbb, 0x6a, 0x35, + 0xca, 0xfb, 0x95, 0xe6, 0xa2, 0xb3, 0x34, 0xe5, 0xe4, 0x8b, 0x69, 0x04, 0x9e, 0xe2, 0xd6, 0xdf, + 0xae, 0xa0, 0xcd, 0x39, 0x5d, 0x67, 0x8c, 0x46, 0x94, 0x83, 0xff, 0xb8, 0xb6, 0x32, 0x5a, 0x26, + 0xbe, 0xd2, 0x65, 0x7b, 0xcb, 0xc4, 0xff, 0x96, 0xa6, 0x6d, 0x94, 0x8b, 0xa3, 0x80, 0x62, 0x1f, + 0x98, 0xd2, 0x95, 0xf7, 0xd2, 0xd8, 0xf9, 0x0d, 0xe5, 0x7d, 0x2c, 0x70, 0x87, 0x93, 0x1b, 0x70, + 0x57, 0x55, 0xc7, 0x9c, 0x04, 0xce, 0xc9, 0x0d, 0xc8, 0xbe, 0x57, 0x8c, 0x0e, 0x3b, 0x24, 0xf4, + 0x61, 0xe2, 0x66, 0x54, 0x36, 0x2f, 0x91, 0xb6, 0x04, 0x9c, 0xdf, 0x51, 0x41, 0x9f, 0x4c, 0x57, + 0x67, 0x55, 0x1e, 0x69, 0x48, 0xd5, 0xff, 0x8a, 0x72, 0xaa, 0x7e, 0x00, 0x53, 0x37, 0xa7, 0x06, + 0x67, 0x65, 0x7c, 0x0a, 0x53, 0xe7, 0x27, 0x94, 0x11, 0x54, 0x25, 0xf2, 0x2a, 0xb1, 0x2a, 0xa8, + 0x84, 0xff, 0x44, 0xe5, 0xa4, 0x65, 0x3c, 0x1c, 0x62, 0x36, 0x75, 0x91, 0x4a, 0x97, 0x4c, 0x57, + 0x0d, 0xa6, 0xaa, 0xfb, 0x98, 0xf7, 0xdd, 0x82, 0x3e, 0x92, 0x04, 0xfe, 0xc7, 0xbc, 0x2f, 0x65, + 0x45, 0xe6, 0x0a, 0x3b, 0x58, 0xb8, 0x45, 0x2d, 0x2b, 0x81, 0x0e, 0x84, 0xd3, 0x44, 0x9b, 0xc9, + 0x75, 0x45, 0x8c, 0x8e, 0x88, 0x0f, 0x4c, 0xde, 0x5b, 0xa9, 0x6a, 0x35, 0x4a, 0xde, 0x86, 0x49, + 0x9d, 0x99, 0x4c, 0xdb, 0x97, 0xa2, 0x7a, 0x74, 0x18, 0x31, 0xe0, 0x9c, 0xd0, 0x50, 0x52, 0xcb, + 0x8a, 0x5a, 0x9a, 0x43, 0xdb, 0x7e, 0xfd, 0xe3, 0x0a, 0xda, 0x9a, 0xb3, 0xf1, 0x84, 0x84, 0x38, + 0x20, 0x37, 0x4f, 0xf1, 0x71, 0x0b, 0xad, 0x8e, 0x70, 0x60, 0x2c, 0xb4, 0x3d, 0x1d, 0x38, 0x2e, + 0xca, 0x92, 0x50, 0xe3, 0xb6, 0xc2, 0x93, 0x50, 0x66, 0x70, 0x97, 0x0b, 0x4c, 0x42, 0x63, 0x5d, + 0x12, 0xca, 0x4e, 0x82, 0x0a, 0x1c, 0x18, 0xd3, 0x74, 0xe0, 0xfc, 0xab, 0x76, 0x5a, 0xc4, 0x5c, + 0x79, 0x55, 0xde, 0xaf, 0x2f, 0x5e, 0x4f, 0xad, 0xff, 0x5c, 0x31, 0x3d, 0x53, 0xe1, 0xfc, 0x85, + 0xd6, 0x18, 0x8c, 0x31, 0xf3, 0x3b, 0x82, 0x01, 0xe6, 0x31, 0xd3, 0x96, 0xda, 0x5e, 0x59, 0xc3, + 0x17, 0x06, 0x9d, 0x23, 0xa6, 0x4b, 0x97, 0x9f, 0x27, 0x3e, 0x4f, 0x56, 0x6f, 0x17, 0x6d, 0x18, + 0xa2, 0x0f, 0x01, 0x5c, 0x63, 0x41, 0x68, 0xa8, 0xec, 0xb6, 0xbd, 0x75, 0x9d, 0xf8, 0x2f, 0xc5, + 0x9d, 0x1a, 0x2a, 0x26, 0xe3, 0xd5, 0xb9, 0x0a, 0x8a, 0x57, 0x30, 0xb3, 0xd5, 0xe9, 0x6a, 0xa8, + 0x78, 0x95, 0xdc, 0xf9, 0xcc, 0xf8, 0x42, 0x8a, 0x1d, 0x88, 0xcf, 0x5e, 0x42, 0xe9, 0xc1, 0x4b, + 0xf8, 0x03, 0x95, 0x42, 0x98, 0x88, 0x99, 0xea, 0xb2, 0x22, 0x14, 0x25, 0x98, 0x68, 0xae, 0x5f, + 0x21, 0x57, 0x59, 0x7c, 0x14, 0x60, 0x32, 0x84, 0xf4, 0x2c, 0x1e, 0x0d, 0xe0, 0xfb, 0x6d, 0xae, + 0xa1, 0x62, 0x08, 0xe3, 0xd9, 0x20, 0xfd, 0x60, 0x0b, 0x21, 0x8c, 0xd3, 0x39, 0xaf, 0x2d, 0x33, + 0xe8, 0x7c, 0x40, 0xa2, 0xe8, 0x47, 0x07, 0xed, 0xa2, 0x8d, 0x88, 0xc1, 0x88, 0xd0, 0x98, 0x3f, + 0x9c, 0xb6, 0x9e, 0x24, 0x52, 0x3b, 0x1e, 0xaa, 0xb2, 0xbf, 0x54, 0x35, 0x34, 0xdf, 0xa9, 0x33, + 0x4a, 0x42, 0xd1, 0x0e, 0x7b, 0xd2, 0xf2, 0xaf, 0xed, 0xf7, 0x63, 0xdf, 0x50, 0xf9, 0xa0, 0x62, + 0xc6, 0x20, 0x14, 0x9d, 0x48, 0xb6, 0xe2, 0x66, 0xe1, 0x4b, 0x06, 0x55, 0xfd, 0x79, 0xfd, 0x08, + 0xad, 0xcf, 0xc6, 0x71, 0x0f, 0x38, 0x88, 0x27, 0xcf, 0x3a, 0x3c, 0x79, 0x77, 0x57, 0xb1, 0x6e, + 0xef, 0x2a, 0xd6, 0x87, 0xbb, 0x8a, 0xf5, 0xea, 0xbe, 0xb2, 0x74, 0x7b, 0x5f, 0x59, 0x7a, 0x7f, + 0x5f, 0x59, 0x7a, 0xf9, 0xf7, 0x35, 0x11, 0xfd, 0xb8, 0xdb, 0xec, 0xd1, 0x61, 0xeb, 0xf4, 0xc5, + 0xe5, 0xf1, 0x33, 0x10, 0x63, 0xca, 0x06, 0xad, 0x5e, 0x1f, 0x93, 0xb0, 0x35, 0x49, 0xff, 0x23, + 0x62, 0x1a, 0x01, 0xef, 0x66, 0xd4, 0x3f, 0xe4, 0x9f, 0x4f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, + 0xe3, 0xf5, 0xed, 0xc1, 0x06, 0x00, 0x00, +} + +func (m *EventBundleVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventBundleVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventBundleVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Vote != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Vote)) + i-- + dAtA[i] = 0x20 + } + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventBundleProposed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventBundleProposed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventBundleProposed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CompressionId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CompressionId)) + i-- + dAtA[i] = 0x70 + } + if m.StorageProviderId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.StorageProviderId)) + i-- + dAtA[i] = 0x68 + } + if m.ProposedAt != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.ProposedAt)) + i-- + dAtA[i] = 0x60 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintEvents(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x5a + } + if len(m.BundleSummary) > 0 { + i -= len(m.BundleSummary) + copy(dAtA[i:], m.BundleSummary) + i = encodeVarintEvents(dAtA, i, uint64(len(m.BundleSummary))) + i-- + dAtA[i] = 0x52 + } + if len(m.ToKey) > 0 { + i -= len(m.ToKey) + copy(dAtA[i:], m.ToKey) + i = encodeVarintEvents(dAtA, i, uint64(len(m.ToKey))) + i-- + dAtA[i] = 0x4a + } + if len(m.FromKey) > 0 { + i -= len(m.FromKey) + copy(dAtA[i:], m.FromKey) + i = encodeVarintEvents(dAtA, i, uint64(len(m.FromKey))) + i-- + dAtA[i] = 0x42 + } + if m.BundleSize != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.BundleSize)) + i-- + dAtA[i] = 0x38 + } + if m.FromIndex != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.FromIndex)) + i-- + dAtA[i] = 0x30 + } + if m.DataSize != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.DataSize)) + i-- + dAtA[i] = 0x28 + } + if len(m.Uploader) > 0 { + i -= len(m.Uploader) + copy(dAtA[i:], m.Uploader) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Uploader))) + i-- + dAtA[i] = 0x22 + } + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintEvents(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x1a + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventBundleFinalized) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventBundleFinalized) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventBundleFinalized) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NextUploader) > 0 { + i -= len(m.NextUploader) + copy(dAtA[i:], m.NextUploader) + i = encodeVarintEvents(dAtA, i, uint64(len(m.NextUploader))) + i-- + dAtA[i] = 0x72 + } + if len(m.Uploader) > 0 { + i -= len(m.Uploader) + copy(dAtA[i:], m.Uploader) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Uploader))) + i-- + dAtA[i] = 0x6a + } + if m.FinalizedAt != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.FinalizedAt)) + i-- + dAtA[i] = 0x60 + } + if m.RewardTotal != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.RewardTotal)) + i-- + dAtA[i] = 0x58 + } + if m.RewardDelegation != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.RewardDelegation)) + i-- + dAtA[i] = 0x50 + } + if m.RewardUploader != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.RewardUploader)) + i-- + dAtA[i] = 0x48 + } + if m.RewardTreasury != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.RewardTreasury)) + i-- + dAtA[i] = 0x40 + } + if m.Status != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x38 + } + if m.Total != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x30 + } + if m.Abstain != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Abstain)) + i-- + dAtA[i] = 0x28 + } + if m.Invalid != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Invalid)) + i-- + dAtA[i] = 0x20 + } + if m.Valid != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Valid)) + i-- + dAtA[i] = 0x18 + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventClaimedUploaderRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventClaimedUploaderRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventClaimedUploaderRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewUploader) > 0 { + i -= len(m.NewUploader) + copy(dAtA[i:], m.NewUploader) + i = encodeVarintEvents(dAtA, i, uint64(len(m.NewUploader))) + i-- + dAtA[i] = 0x1a + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventSkippedUploaderRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSkippedUploaderRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSkippedUploaderRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NewUploader) > 0 { + i -= len(m.NewUploader) + copy(dAtA[i:], m.NewUploader) + i = encodeVarintEvents(dAtA, i, uint64(len(m.NewUploader))) + i-- + dAtA[i] = 0x22 + } + if len(m.PreviousUploader) > 0 { + i -= len(m.PreviousUploader) + copy(dAtA[i:], m.PreviousUploader) + i = encodeVarintEvents(dAtA, i, uint64(len(m.PreviousUploader))) + i-- + dAtA[i] = 0x1a + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventPointIncreased) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventPointIncreased) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventPointIncreased) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentPoints != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CurrentPoints)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventPointsReset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventPointsReset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventPointsReset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventBundleVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Vote != 0 { + n += 1 + sovEvents(uint64(m.Vote)) + } + return n +} + +func (m *EventBundleProposed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Uploader) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.DataSize != 0 { + n += 1 + sovEvents(uint64(m.DataSize)) + } + if m.FromIndex != 0 { + n += 1 + sovEvents(uint64(m.FromIndex)) + } + if m.BundleSize != 0 { + n += 1 + sovEvents(uint64(m.BundleSize)) + } + l = len(m.FromKey) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ToKey) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.BundleSummary) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.ProposedAt != 0 { + n += 1 + sovEvents(uint64(m.ProposedAt)) + } + if m.StorageProviderId != 0 { + n += 1 + sovEvents(uint64(m.StorageProviderId)) + } + if m.CompressionId != 0 { + n += 1 + sovEvents(uint64(m.CompressionId)) + } + return n +} + +func (m *EventBundleFinalized) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + if m.Valid != 0 { + n += 1 + sovEvents(uint64(m.Valid)) + } + if m.Invalid != 0 { + n += 1 + sovEvents(uint64(m.Invalid)) + } + if m.Abstain != 0 { + n += 1 + sovEvents(uint64(m.Abstain)) + } + if m.Total != 0 { + n += 1 + sovEvents(uint64(m.Total)) + } + if m.Status != 0 { + n += 1 + sovEvents(uint64(m.Status)) + } + if m.RewardTreasury != 0 { + n += 1 + sovEvents(uint64(m.RewardTreasury)) + } + if m.RewardUploader != 0 { + n += 1 + sovEvents(uint64(m.RewardUploader)) + } + if m.RewardDelegation != 0 { + n += 1 + sovEvents(uint64(m.RewardDelegation)) + } + if m.RewardTotal != 0 { + n += 1 + sovEvents(uint64(m.RewardTotal)) + } + if m.FinalizedAt != 0 { + n += 1 + sovEvents(uint64(m.FinalizedAt)) + } + l = len(m.Uploader) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.NextUploader) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventClaimedUploaderRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + l = len(m.NewUploader) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventSkippedUploaderRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + l = len(m.PreviousUploader) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.NewUploader) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventPointIncreased) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.CurrentPoints != 0 { + n += 1 + sovEvents(uint64(m.CurrentPoints)) + } + return n +} + +func (m *EventPointsReset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventBundleVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBundleVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBundleVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= VoteType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBundleProposed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBundleProposed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBundleProposed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSize", wireType) + } + m.DataSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIndex", wireType) + } + m.FromIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FromIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSize", wireType) + } + m.BundleSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BundleSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSummary", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BundleSummary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposedAt", wireType) + } + m.ProposedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProposedAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageProviderId", wireType) + } + m.StorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressionId", wireType) + } + m.CompressionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressionId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBundleFinalized) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBundleFinalized: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBundleFinalized: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Valid", wireType) + } + m.Valid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Valid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Invalid", wireType) + } + m.Invalid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Invalid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Abstain", wireType) + } + m.Abstain = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Abstain |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= BundleStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RewardTreasury", wireType) + } + m.RewardTreasury = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RewardTreasury |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RewardUploader", wireType) + } + m.RewardUploader = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RewardUploader |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RewardDelegation", wireType) + } + m.RewardDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RewardDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RewardTotal", wireType) + } + m.RewardTotal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RewardTotal |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedAt", wireType) + } + m.FinalizedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FinalizedAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextUploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextUploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventClaimedUploaderRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventClaimedUploaderRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventClaimedUploaderRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewUploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewUploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSkippedUploaderRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSkippedUploaderRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSkippedUploaderRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousUploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreviousUploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NewUploader", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NewUploader = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventPointIncreased) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventPointIncreased: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventPointIncreased: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentPoints", wireType) + } + m.CurrentPoints = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentPoints |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventPointsReset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventPointsReset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventPointsReset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/bundles/types/expected_keepers.go b/x/bundles/types/expected_keepers.go new file mode 100644 index 00000000..1b488f68 --- /dev/null +++ b/x/bundles/types/expected_keepers.go @@ -0,0 +1,61 @@ +package types + +import ( + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetModuleAddress(moduleName string) sdk.AccAddress +} + +type DistrKeeper interface { + FundCommunityPool(ctx sdk.Context, amount sdk.Coins, sender sdk.AccAddress) error +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToModule(ctx sdk.Context, senderModule, recipientModule string, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error +} + +type UpgradeKeeper interface { + ScheduleUpgrade(ctx sdk.Context, plan types.Plan) error +} + +type PoolKeeper interface { + AssertPoolExists(ctx sdk.Context, poolId uint64) error + GetPoolWithError(ctx sdk.Context, poolId uint64) (pooltypes.Pool, error) + GetPool(ctx sdk.Context, id uint64) (val pooltypes.Pool, found bool) + + IncrementBundleInformation(ctx sdk.Context, poolId uint64, currentHeight uint64, currentKey string, currentValue string) + + GetAllPools(ctx sdk.Context) (list []pooltypes.Pool) + ChargeFundersOfPool(ctx sdk.Context, poolId uint64, amount uint64) error +} + +type StakerKeeper interface { + GetAllStakerAddressesOfPool(ctx sdk.Context, poolId uint64) (stakers []string) + GetCommission(ctx sdk.Context, stakerAddress string) sdk.Dec + AssertValaccountAuthorized(ctx sdk.Context, poolId uint64, stakerAddress string, valaddress string) error + + DoesStakerExist(ctx sdk.Context, staker string) bool + DoesValaccountExist(ctx sdk.Context, poolId uint64, stakerAddress string) bool + + LeavePool(ctx sdk.Context, staker string, poolId uint64) + + IncrementPoints(ctx sdk.Context, poolId uint64, stakerAddress string) (newPoints uint64) + ResetPoints(ctx sdk.Context, poolId uint64, stakerAddress string) (previousPoints uint64) +} + +type DelegationKeeper interface { + GetDelegationAmount(ctx sdk.Context, staker string) uint64 + GetDelegationOfPool(ctx sdk.Context, poolId uint64) uint64 + PayoutRewards(ctx sdk.Context, staker string, amount uint64, payerModuleName string) (success bool) + SlashDelegators(ctx sdk.Context, poolId uint64, staker string, slashType delegationTypes.SlashType) +} diff --git a/x/bundles/types/genesis.go b/x/bundles/types/genesis.go new file mode 100644 index 00000000..499f26e6 --- /dev/null +++ b/x/bundles/types/genesis.go @@ -0,0 +1,52 @@ +package types + +import ( + "fmt" + "sort" +) + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + // this line is used by starport scaffolding # genesis/types/default + Params: DefaultParams(), + } +} + +// Validate performs basic genesis state validation returning an error upon any failure. +func (gs GenesisState) Validate() error { + // Bundle proposal + bundleProposalKey := make(map[string]struct{}) + + for _, elem := range gs.BundleProposalList { + index := string(BundleProposalKey(elem.PoolId)) + if _, ok := bundleProposalKey[index]; ok { + return fmt.Errorf("duplicated pool-id for bundle proposal %v", elem) + } + bundleProposalKey[index] = struct{}{} + } + + // Finalized bundles + finalizedBundleProposals := make(map[string]struct{}) + previousIndexPerPool := make(map[uint64]uint64) + + sort.Slice(gs.FinalizedBundleList, func(i, j int) bool { + return gs.FinalizedBundleList[i].Id < gs.FinalizedBundleList[j].Id + }) + + for _, elem := range gs.FinalizedBundleList { + index := string(FinalizedBundleKey(elem.PoolId, elem.Id)) + if _, ok := finalizedBundleProposals[index]; ok { + return fmt.Errorf("duplicated index for finalized bundle %v", elem) + } + finalizedBundleProposals[index] = struct{}{} + + if previousIndexPerPool[elem.PoolId] == elem.Id { + previousIndexPerPool[elem.PoolId] += 1 + } else { + return fmt.Errorf("missing finalized bundle %v", elem) + } + } + + return gs.Params.Validate() +} diff --git a/x/bundles/types/genesis.pb.go b/x/bundles/types/genesis.pb.go new file mode 100644 index 00000000..abd26aa3 --- /dev/null +++ b/x/bundles/types/genesis.pb.go @@ -0,0 +1,456 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/bundles/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the bundles module's genesis state. +type GenesisState struct { + // params defines all the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + // bundle_proposal_list ... + BundleProposalList []BundleProposal `protobuf:"bytes,2,rep,name=bundle_proposal_list,json=bundleProposalList,proto3" json:"bundle_proposal_list"` + // finalized_bundle_list ... + FinalizedBundleList []FinalizedBundle `protobuf:"bytes,3,rep,name=finalized_bundle_list,json=finalizedBundleList,proto3" json:"finalized_bundle_list"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_21c07b409d3bb015, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetBundleProposalList() []BundleProposal { + if m != nil { + return m.BundleProposalList + } + return nil +} + +func (m *GenesisState) GetFinalizedBundleList() []FinalizedBundle { + if m != nil { + return m.FinalizedBundleList + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "kyve.bundles.v1beta1.GenesisState") +} + +func init() { + proto.RegisterFile("kyve/bundles/v1beta1/genesis.proto", fileDescriptor_21c07b409d3bb015) +} + +var fileDescriptor_21c07b409d3bb015 = []byte{ + // 301 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xca, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0x2a, 0xcd, 0x4b, 0xc9, 0x49, 0x2d, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x01, 0xa9, 0xd1, 0x83, 0xaa, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, + 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0xb0, 0x9b, 0x07, 0xd3, 0x0b, 0x51, 0xa3, 0x88, 0x55, 0x4d, + 0x41, 0x62, 0x51, 0x62, 0x2e, 0x54, 0x89, 0x52, 0x27, 0x13, 0x17, 0x8f, 0x3b, 0xc4, 0x11, 0xc1, + 0x25, 0x89, 0x25, 0xa9, 0x42, 0x56, 0x5c, 0x6c, 0x10, 0x05, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xdc, + 0x46, 0x32, 0x7a, 0xd8, 0x1c, 0xa5, 0x17, 0x00, 0x56, 0xe3, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, + 0x10, 0x54, 0x87, 0x50, 0x0c, 0x97, 0x08, 0x44, 0x5d, 0x7c, 0x41, 0x51, 0x7e, 0x41, 0x7e, 0x71, + 0x62, 0x4e, 0x7c, 0x4e, 0x66, 0x71, 0x89, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x0a, 0x76, + 0x93, 0x9c, 0xc0, 0xfc, 0x00, 0xa8, 0x06, 0xa8, 0x89, 0x42, 0x49, 0x28, 0xa2, 0x3e, 0x99, 0xc5, + 0x25, 0x42, 0xf1, 0x5c, 0xa2, 0x69, 0x99, 0x79, 0x89, 0x39, 0x99, 0x55, 0xa9, 0x29, 0xf1, 0x50, + 0x7b, 0xc0, 0xc6, 0x33, 0x83, 0x8d, 0x57, 0xc5, 0x6e, 0xbc, 0x1b, 0x4c, 0x0b, 0xc4, 0x1e, 0xa8, + 0xf9, 0xc2, 0x69, 0xa8, 0xc2, 0x20, 0x0b, 0x9c, 0xdc, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, + 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, + 0x58, 0x8e, 0x21, 0x4a, 0x27, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0xdf, + 0x3b, 0x32, 0xcc, 0xd5, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x5b, 0x3f, 0x39, 0x23, 0x31, 0x33, + 0x4f, 0xbf, 0x02, 0x1e, 0xc4, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x49, 0x6c, 0xe0, 0xa0, 0x35, 0x06, + 0x04, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xed, 0x45, 0xe0, 0xf3, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FinalizedBundleList) > 0 { + for iNdEx := len(m.FinalizedBundleList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FinalizedBundleList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.BundleProposalList) > 0 { + for iNdEx := len(m.BundleProposalList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BundleProposalList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.BundleProposalList) > 0 { + for _, e := range m.BundleProposalList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.FinalizedBundleList) > 0 { + for _, e := range m.FinalizedBundleList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleProposalList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BundleProposalList = append(m.BundleProposalList, BundleProposal{}) + if err := m.BundleProposalList[len(m.BundleProposalList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedBundleList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FinalizedBundleList = append(m.FinalizedBundleList, FinalizedBundle{}) + if err := m.FinalizedBundleList[len(m.FinalizedBundleList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/bundles/types/keys.go b/x/bundles/types/keys.go new file mode 100644 index 00000000..0c5597bf --- /dev/null +++ b/x/bundles/types/keys.go @@ -0,0 +1,45 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +const ( + // ModuleName defines the module name + ModuleName = "bundles" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey defines the module's message routing key + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_bundles" +) + +var ParamsKey = []byte{0x00} + +var ( + // BundleKeyPrefix ... + BundleKeyPrefix = []byte{1} + // FinalizedBundlePrefix ... + FinalizedBundlePrefix = []byte{2} + + FinalizedBundleByHeightPrefix = []byte{11} +) + +// BundleProposalKey ... +func BundleProposalKey(poolId uint64) []byte { + return util.GetByteKey(poolId) +} + +// FinalizedBundleKey ... +func FinalizedBundleKey(poolId uint64, id uint64) []byte { + return util.GetByteKey(poolId, id) +} + +// FinalizedBundleByHeightKey ... +func FinalizedBundleByHeightKey(poolId uint64, height uint64) []byte { + return util.GetByteKey(poolId, height) +} diff --git a/x/bundles/types/message_claim_uploader_role.go b/x/bundles/types/message_claim_uploader_role.go new file mode 100644 index 00000000..0ea543e5 --- /dev/null +++ b/x/bundles/types/message_claim_uploader_role.go @@ -0,0 +1,47 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgClaimUploaderRole = "claim_uploader_role" + +var _ sdk.Msg = &MsgClaimUploaderRole{} + +func NewMsgClaimUploaderRole(creator string, staker string, poolId uint64) *MsgClaimUploaderRole { + return &MsgClaimUploaderRole{ + Creator: creator, + Staker: staker, + PoolId: poolId, + } +} + +func (msg *MsgClaimUploaderRole) Route() string { + return RouterKey +} + +func (msg *MsgClaimUploaderRole) Type() string { + return TypeMsgClaimUploaderRole +} + +func (msg *MsgClaimUploaderRole) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgClaimUploaderRole) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgClaimUploaderRole) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/bundles/types/message_skip_uploader_role.go b/x/bundles/types/message_skip_uploader_role.go new file mode 100644 index 00000000..c051a266 --- /dev/null +++ b/x/bundles/types/message_skip_uploader_role.go @@ -0,0 +1,48 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgSkipUploaderRole = "skip_uploader_role" + +var _ sdk.Msg = &MsgSkipUploaderRole{} + +func NewMsgSkipUploaderRole(creator string, staker string, poolId uint64, fromIndex uint64) *MsgSkipUploaderRole { + return &MsgSkipUploaderRole{ + Creator: creator, + Staker: staker, + PoolId: poolId, + FromIndex: fromIndex, + } +} + +func (msg *MsgSkipUploaderRole) Route() string { + return RouterKey +} + +func (msg *MsgSkipUploaderRole) Type() string { + return TypeMsgSkipUploaderRole +} + +func (msg *MsgSkipUploaderRole) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgSkipUploaderRole) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgSkipUploaderRole) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/bundles/types/message_submit_bundle_proposal.go b/x/bundles/types/message_submit_bundle_proposal.go new file mode 100644 index 00000000..595a5a62 --- /dev/null +++ b/x/bundles/types/message_submit_bundle_proposal.go @@ -0,0 +1,55 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgSubmitBundleProposal = "submit_bundle_proposal" + +var _ sdk.Msg = &MsgSubmitBundleProposal{} + +func NewMsgSubmitBundleProposal(creator string, staker string, poolId uint64, storageId string, dataSize uint64, dataHash string, fromIndex uint64, bundleSize uint64, fromKey string, toKey string, bundleSummary string) *MsgSubmitBundleProposal { + return &MsgSubmitBundleProposal{ + Creator: creator, + Staker: staker, + PoolId: poolId, + StorageId: storageId, + DataSize: dataSize, + FromIndex: fromIndex, + BundleSize: bundleSize, + FromKey: fromKey, + ToKey: toKey, + BundleSummary: bundleSummary, + DataHash: dataHash, + } +} + +func (msg *MsgSubmitBundleProposal) Route() string { + return RouterKey +} + +func (msg *MsgSubmitBundleProposal) Type() string { + return TypeMsgSubmitBundleProposal +} + +func (msg *MsgSubmitBundleProposal) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgSubmitBundleProposal) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgSubmitBundleProposal) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/bundles/types/message_vote_bundle_proposal.go b/x/bundles/types/message_vote_bundle_proposal.go new file mode 100644 index 00000000..9ae2f194 --- /dev/null +++ b/x/bundles/types/message_vote_bundle_proposal.go @@ -0,0 +1,49 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgVoteBundleProposal = "vote_bundle_proposal" + +var _ sdk.Msg = &MsgVoteBundleProposal{} + +func NewMsgVoteBundleProposal(creator string, staker string, poolId uint64, storageId string, vote VoteType) *MsgVoteBundleProposal { + return &MsgVoteBundleProposal{ + Creator: creator, + Staker: staker, + PoolId: poolId, + StorageId: storageId, + Vote: vote, + } +} + +func (msg *MsgVoteBundleProposal) Route() string { + return RouterKey +} + +func (msg *MsgVoteBundleProposal) Type() string { + return TypeMsgVoteBundleProposal +} + +func (msg *MsgVoteBundleProposal) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgVoteBundleProposal) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgVoteBundleProposal) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/bundles/types/msgs.go b/x/bundles/types/msgs.go new file mode 100644 index 00000000..e568d11e --- /dev/null +++ b/x/bundles/types/msgs.go @@ -0,0 +1,35 @@ +package types + +import ( + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ sdk.Msg = &MsgUpdateParams{} + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (msg *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + params := DefaultParams() + if err := json.Unmarshal([]byte(msg.Payload), ¶ms); err != nil { + return err + } + + if err := params.Validate(); err != nil { + return err + } + + return nil +} diff --git a/x/bundles/types/params.go b/x/bundles/types/params.go new file mode 100644 index 00000000..4e8a078f --- /dev/null +++ b/x/bundles/types/params.go @@ -0,0 +1,63 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +// DefaultUploadTimeout ... +var DefaultUploadTimeout = uint64(600) + +// DefaultStorageCost ... +var DefaultStorageCost = uint64(100000) + +// DefaultNetworkFee ... +var DefaultNetworkFee = "0.01" + +// DefaultMaxPoints ... +var DefaultMaxPoints = uint64(5) + +// NewParams creates a new Params instance +func NewParams( + uploadTimeout uint64, + storageCost uint64, + networkFee string, + maxPoints uint64, +) Params { + return Params{ + UploadTimeout: uploadTimeout, + StorageCost: storageCost, + NetworkFee: networkFee, + MaxPoints: maxPoints, + } +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams( + DefaultUploadTimeout, + DefaultStorageCost, + DefaultNetworkFee, + DefaultMaxPoints, + ) +} + +// Validate validates the set of params +func (p Params) Validate() error { + if err := util.ValidateUint64(p.UploadTimeout); err != nil { + return err + } + + if err := util.ValidateUint64(p.StorageCost); err != nil { + return err + } + + if err := util.ValidatePercentage(p.NetworkFee); err != nil { + return err + } + + if err := util.ValidateUint64(p.MaxPoints); err != nil { + return err + } + + return nil +} diff --git a/x/bundles/types/params.pb.go b/x/bundles/types/params.pb.go new file mode 100644 index 00000000..4e323a0a --- /dev/null +++ b/x/bundles/types/params.pb.go @@ -0,0 +1,430 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/bundles/v1beta1/params.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the bundles module parameters. +type Params struct { + // upload_timeout ... + UploadTimeout uint64 `protobuf:"varint,1,opt,name=upload_timeout,json=uploadTimeout,proto3" json:"upload_timeout,omitempty"` + // storage_cost ... + StorageCost uint64 `protobuf:"varint,2,opt,name=storage_cost,json=storageCost,proto3" json:"storage_cost,omitempty"` + // network_fee ... + NetworkFee string `protobuf:"bytes,3,opt,name=network_fee,json=networkFee,proto3" json:"network_fee,omitempty"` + // max_points ... + MaxPoints uint64 `protobuf:"varint,4,opt,name=max_points,json=maxPoints,proto3" json:"max_points,omitempty"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_cfd3a74b72a01aaa, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetUploadTimeout() uint64 { + if m != nil { + return m.UploadTimeout + } + return 0 +} + +func (m *Params) GetStorageCost() uint64 { + if m != nil { + return m.StorageCost + } + return 0 +} + +func (m *Params) GetNetworkFee() string { + if m != nil { + return m.NetworkFee + } + return "" +} + +func (m *Params) GetMaxPoints() uint64 { + if m != nil { + return m.MaxPoints + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "kyve.bundles.v1beta1.Params") +} + +func init() { proto.RegisterFile("kyve/bundles/v1beta1/params.proto", fileDescriptor_cfd3a74b72a01aaa) } + +var fileDescriptor_cfd3a74b72a01aaa = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcc, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0x2a, 0xcd, 0x4b, 0xc9, 0x49, 0x2d, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x01, + 0x29, 0xd1, 0x83, 0x2a, 0xd1, 0x83, 0x2a, 0x51, 0x9a, 0xc4, 0xc8, 0xc5, 0x16, 0x00, 0x56, 0x26, + 0xa4, 0xca, 0xc5, 0x57, 0x5a, 0x90, 0x93, 0x9f, 0x98, 0x12, 0x5f, 0x92, 0x99, 0x9b, 0x9a, 0x5f, + 0x5a, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x12, 0xc4, 0x0b, 0x11, 0x0d, 0x81, 0x08, 0x0a, 0x29, + 0x72, 0xf1, 0x14, 0x97, 0xe4, 0x17, 0x25, 0xa6, 0xa7, 0xc6, 0x27, 0xe7, 0x17, 0x97, 0x48, 0x30, + 0x81, 0x15, 0x71, 0x43, 0xc5, 0x9c, 0xf3, 0x8b, 0x4b, 0x84, 0xe4, 0xb9, 0xb8, 0xf3, 0x52, 0x4b, + 0xca, 0xf3, 0x8b, 0xb2, 0xe3, 0xd3, 0x52, 0x53, 0x25, 0x98, 0x15, 0x18, 0x35, 0x38, 0x83, 0xb8, + 0xa0, 0x42, 0x6e, 0xa9, 0xa9, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0x89, 0x15, 0xf1, 0x05, 0xf9, 0x99, + 0x79, 0x25, 0xc5, 0x12, 0x2c, 0x60, 0x13, 0x38, 0x73, 0x13, 0x2b, 0x02, 0xc0, 0x02, 0x4e, 0x6e, + 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, + 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x93, 0x9e, 0x59, 0x92, 0x51, + 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0xef, 0x1d, 0x19, 0xe6, 0xea, 0x07, 0x31, 0x53, 0x3f, 0x39, + 0x23, 0x31, 0x33, 0x4f, 0xbf, 0x02, 0x1e, 0x02, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x49, 0x6c, 0x60, + 0x9f, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfa, 0x0a, 0x60, 0x9b, 0x1e, 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxPoints != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.MaxPoints)) + i-- + dAtA[i] = 0x20 + } + if len(m.NetworkFee) > 0 { + i -= len(m.NetworkFee) + copy(dAtA[i:], m.NetworkFee) + i = encodeVarintParams(dAtA, i, uint64(len(m.NetworkFee))) + i-- + dAtA[i] = 0x1a + } + if m.StorageCost != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.StorageCost)) + i-- + dAtA[i] = 0x10 + } + if m.UploadTimeout != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.UploadTimeout)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UploadTimeout != 0 { + n += 1 + sovParams(uint64(m.UploadTimeout)) + } + if m.StorageCost != 0 { + n += 1 + sovParams(uint64(m.StorageCost)) + } + l = len(m.NetworkFee) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + if m.MaxPoints != 0 { + n += 1 + sovParams(uint64(m.MaxPoints)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadTimeout", wireType) + } + m.UploadTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UploadTimeout |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageCost", wireType) + } + m.StorageCost = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageCost |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkFee", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkFee = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPoints", wireType) + } + m.MaxPoints = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxPoints |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/bundles/types/query.pb.go b/x/bundles/types/query.pb.go new file mode 100644 index 00000000..f0dcd232 --- /dev/null +++ b/x/bundles/types/query.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/bundles/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_417b774a70d5f5fd, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_417b774a70d5f5fd, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "kyve.bundles.v1beta1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "kyve.bundles.v1beta1.QueryParamsResponse") +} + +func init() { proto.RegisterFile("kyve/bundles/v1beta1/query.proto", fileDescriptor_417b774a70d5f5fd) } + +var fileDescriptor_417b774a70d5f5fd = []byte{ + // 289 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xc8, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0x2a, 0xcd, 0x4b, 0xc9, 0x49, 0x2d, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd4, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x01, 0xa9, + 0xd0, 0x83, 0xaa, 0xd0, 0x83, 0xaa, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd0, 0x07, + 0xb1, 0x20, 0x6a, 0xa5, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, + 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0xa1, 0xb2, 0x8a, 0x58, 0xed, + 0x2a, 0x48, 0x2c, 0x4a, 0xcc, 0x85, 0x2a, 0x51, 0x12, 0xe1, 0x12, 0x0a, 0x04, 0xd9, 0x1d, 0x00, + 0x16, 0x0c, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x51, 0x0a, 0xe4, 0x12, 0x46, 0x11, 0x2d, 0x2e, + 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0xb2, 0xe2, 0x62, 0x83, 0x68, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, + 0x36, 0x92, 0xd1, 0xc3, 0xe6, 0x54, 0x3d, 0x88, 0x2e, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, + 0xa0, 0x3a, 0x8c, 0x7a, 0x19, 0xb9, 0x58, 0xc1, 0x66, 0x0a, 0x35, 0x33, 0x72, 0xb1, 0x41, 0x94, + 0x08, 0x69, 0x60, 0x37, 0x00, 0xd3, 0x45, 0x52, 0x9a, 0x44, 0xa8, 0x84, 0xb8, 0x52, 0x49, 0xa5, + 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x72, 0x42, 0x32, 0xfa, 0x78, 0xbc, 0xef, 0xe4, 0x76, 0xe2, 0x91, + 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, + 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x3a, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, + 0xc9, 0xf9, 0xb9, 0xfa, 0xde, 0x91, 0x61, 0xae, 0x7e, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, 0xfa, + 0xc9, 0x19, 0x89, 0x99, 0x79, 0xfa, 0x15, 0x70, 0x03, 0x4b, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, + 0xc0, 0xe1, 0x68, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x3e, 0x95, 0xfc, 0xd8, 0x01, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.bundles.v1beta1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.bundles.v1beta1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.bundles.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/bundles/v1beta1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/bundles/types/query.pb.gw.go b/x/bundles/types/query.pb.gw.go new file mode 100644 index 00000000..f49983e5 --- /dev/null +++ b/x/bundles/types/query.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/bundles/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "bundles", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/x/bundles/types/tx.pb.go b/x/bundles/types/tx.pb.go new file mode 100644 index 00000000..65e519aa --- /dev/null +++ b/x/bundles/types/tx.pb.go @@ -0,0 +1,2816 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/bundles/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// VoteType ... +type VoteType int32 + +const ( + // VOTE_TYPE_UNSPECIFIED ... + VOTE_TYPE_UNSPECIFIED VoteType = 0 + // VOTE_TYPE_VALID ... + VOTE_TYPE_VALID VoteType = 1 + // VOTE_TYPE_INVALID ... + VOTE_TYPE_INVALID VoteType = 2 + // VOTE_TYPE_ABSTAIN ... + VOTE_TYPE_ABSTAIN VoteType = 3 +) + +var VoteType_name = map[int32]string{ + 0: "VOTE_TYPE_UNSPECIFIED", + 1: "VOTE_TYPE_VALID", + 2: "VOTE_TYPE_INVALID", + 3: "VOTE_TYPE_ABSTAIN", +} + +var VoteType_value = map[string]int32{ + "VOTE_TYPE_UNSPECIFIED": 0, + "VOTE_TYPE_VALID": 1, + "VOTE_TYPE_INVALID": 2, + "VOTE_TYPE_ABSTAIN": 3, +} + +func (x VoteType) String() string { + return proto.EnumName(VoteType_name, int32(x)) +} + +func (VoteType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{0} +} + +// MsgSubmitBundleProposal defines a SDK message for submitting a bundle proposal. +type MsgSubmitBundleProposal struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // pool_id ... + PoolId uint64 `protobuf:"varint,3,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // storage_id ... + StorageId string `protobuf:"bytes,4,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + // data_size ... + DataSize uint64 `protobuf:"varint,5,opt,name=data_size,json=dataSize,proto3" json:"data_size,omitempty"` + // data_hash ... + DataHash string `protobuf:"bytes,6,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // from_index ... + FromIndex uint64 `protobuf:"varint,7,opt,name=from_index,json=fromIndex,proto3" json:"from_index,omitempty"` + // bundle_size ... + BundleSize uint64 `protobuf:"varint,8,opt,name=bundle_size,json=bundleSize,proto3" json:"bundle_size,omitempty"` + // from_key + FromKey string `protobuf:"bytes,9,opt,name=from_key,json=fromKey,proto3" json:"from_key,omitempty"` + // to_key ... + ToKey string `protobuf:"bytes,10,opt,name=to_key,json=toKey,proto3" json:"to_key,omitempty"` + // bundle_summary ... + BundleSummary string `protobuf:"bytes,11,opt,name=bundle_summary,json=bundleSummary,proto3" json:"bundle_summary,omitempty"` +} + +func (m *MsgSubmitBundleProposal) Reset() { *m = MsgSubmitBundleProposal{} } +func (m *MsgSubmitBundleProposal) String() string { return proto.CompactTextString(m) } +func (*MsgSubmitBundleProposal) ProtoMessage() {} +func (*MsgSubmitBundleProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{0} +} +func (m *MsgSubmitBundleProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSubmitBundleProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSubmitBundleProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSubmitBundleProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSubmitBundleProposal.Merge(m, src) +} +func (m *MsgSubmitBundleProposal) XXX_Size() int { + return m.Size() +} +func (m *MsgSubmitBundleProposal) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSubmitBundleProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSubmitBundleProposal proto.InternalMessageInfo + +func (m *MsgSubmitBundleProposal) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgSubmitBundleProposal) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *MsgSubmitBundleProposal) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *MsgSubmitBundleProposal) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +func (m *MsgSubmitBundleProposal) GetDataSize() uint64 { + if m != nil { + return m.DataSize + } + return 0 +} + +func (m *MsgSubmitBundleProposal) GetDataHash() string { + if m != nil { + return m.DataHash + } + return "" +} + +func (m *MsgSubmitBundleProposal) GetFromIndex() uint64 { + if m != nil { + return m.FromIndex + } + return 0 +} + +func (m *MsgSubmitBundleProposal) GetBundleSize() uint64 { + if m != nil { + return m.BundleSize + } + return 0 +} + +func (m *MsgSubmitBundleProposal) GetFromKey() string { + if m != nil { + return m.FromKey + } + return "" +} + +func (m *MsgSubmitBundleProposal) GetToKey() string { + if m != nil { + return m.ToKey + } + return "" +} + +func (m *MsgSubmitBundleProposal) GetBundleSummary() string { + if m != nil { + return m.BundleSummary + } + return "" +} + +// MsgSubmitBundleProposalResponse defines the Msg/SubmitBundleProposal response type. +type MsgSubmitBundleProposalResponse struct { +} + +func (m *MsgSubmitBundleProposalResponse) Reset() { *m = MsgSubmitBundleProposalResponse{} } +func (m *MsgSubmitBundleProposalResponse) String() string { return proto.CompactTextString(m) } +func (*MsgSubmitBundleProposalResponse) ProtoMessage() {} +func (*MsgSubmitBundleProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{1} +} +func (m *MsgSubmitBundleProposalResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSubmitBundleProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSubmitBundleProposalResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSubmitBundleProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSubmitBundleProposalResponse.Merge(m, src) +} +func (m *MsgSubmitBundleProposalResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgSubmitBundleProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSubmitBundleProposalResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSubmitBundleProposalResponse proto.InternalMessageInfo + +// MsgVoteBundleProposal defines a SDK message for voting on a bundle proposal. +type MsgVoteBundleProposal struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // id ... + PoolId uint64 `protobuf:"varint,3,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // storage_id ... + StorageId string `protobuf:"bytes,4,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` + // vote ... + Vote VoteType `protobuf:"varint,5,opt,name=vote,proto3,enum=kyve.bundles.v1beta1.VoteType" json:"vote,omitempty"` +} + +func (m *MsgVoteBundleProposal) Reset() { *m = MsgVoteBundleProposal{} } +func (m *MsgVoteBundleProposal) String() string { return proto.CompactTextString(m) } +func (*MsgVoteBundleProposal) ProtoMessage() {} +func (*MsgVoteBundleProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{2} +} +func (m *MsgVoteBundleProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgVoteBundleProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgVoteBundleProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgVoteBundleProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgVoteBundleProposal.Merge(m, src) +} +func (m *MsgVoteBundleProposal) XXX_Size() int { + return m.Size() +} +func (m *MsgVoteBundleProposal) XXX_DiscardUnknown() { + xxx_messageInfo_MsgVoteBundleProposal.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgVoteBundleProposal proto.InternalMessageInfo + +func (m *MsgVoteBundleProposal) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgVoteBundleProposal) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *MsgVoteBundleProposal) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *MsgVoteBundleProposal) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +func (m *MsgVoteBundleProposal) GetVote() VoteType { + if m != nil { + return m.Vote + } + return VOTE_TYPE_UNSPECIFIED +} + +// MsgVoteBundleProposalResponse defines the Msg/VoteBundleProposal response type. +type MsgVoteBundleProposalResponse struct { +} + +func (m *MsgVoteBundleProposalResponse) Reset() { *m = MsgVoteBundleProposalResponse{} } +func (m *MsgVoteBundleProposalResponse) String() string { return proto.CompactTextString(m) } +func (*MsgVoteBundleProposalResponse) ProtoMessage() {} +func (*MsgVoteBundleProposalResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{3} +} +func (m *MsgVoteBundleProposalResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgVoteBundleProposalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgVoteBundleProposalResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgVoteBundleProposalResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgVoteBundleProposalResponse.Merge(m, src) +} +func (m *MsgVoteBundleProposalResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgVoteBundleProposalResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgVoteBundleProposalResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgVoteBundleProposalResponse proto.InternalMessageInfo + +// MsgClaimUploaderRole defines a SDK message for claiming the uploader role. +type MsgClaimUploaderRole struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // id ... + PoolId uint64 `protobuf:"varint,3,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` +} + +func (m *MsgClaimUploaderRole) Reset() { *m = MsgClaimUploaderRole{} } +func (m *MsgClaimUploaderRole) String() string { return proto.CompactTextString(m) } +func (*MsgClaimUploaderRole) ProtoMessage() {} +func (*MsgClaimUploaderRole) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{4} +} +func (m *MsgClaimUploaderRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimUploaderRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimUploaderRole.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimUploaderRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimUploaderRole.Merge(m, src) +} +func (m *MsgClaimUploaderRole) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimUploaderRole) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimUploaderRole.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimUploaderRole proto.InternalMessageInfo + +func (m *MsgClaimUploaderRole) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgClaimUploaderRole) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *MsgClaimUploaderRole) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +// MsgClaimUploaderRoleResponse defines the Msg/ClaimUploaderRole response type. +type MsgClaimUploaderRoleResponse struct { +} + +func (m *MsgClaimUploaderRoleResponse) Reset() { *m = MsgClaimUploaderRoleResponse{} } +func (m *MsgClaimUploaderRoleResponse) String() string { return proto.CompactTextString(m) } +func (*MsgClaimUploaderRoleResponse) ProtoMessage() {} +func (*MsgClaimUploaderRoleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{5} +} +func (m *MsgClaimUploaderRoleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimUploaderRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimUploaderRoleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimUploaderRoleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimUploaderRoleResponse.Merge(m, src) +} +func (m *MsgClaimUploaderRoleResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimUploaderRoleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimUploaderRoleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimUploaderRoleResponse proto.InternalMessageInfo + +// MsgSubmitBundleProposal defines a SDK message for submitting a bundle proposal. +type MsgSkipUploaderRole struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // pool_id ... + PoolId uint64 `protobuf:"varint,3,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // from_index ... + FromIndex uint64 `protobuf:"varint,4,opt,name=from_index,json=fromIndex,proto3" json:"from_index,omitempty"` +} + +func (m *MsgSkipUploaderRole) Reset() { *m = MsgSkipUploaderRole{} } +func (m *MsgSkipUploaderRole) String() string { return proto.CompactTextString(m) } +func (*MsgSkipUploaderRole) ProtoMessage() {} +func (*MsgSkipUploaderRole) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{6} +} +func (m *MsgSkipUploaderRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSkipUploaderRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSkipUploaderRole.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSkipUploaderRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSkipUploaderRole.Merge(m, src) +} +func (m *MsgSkipUploaderRole) XXX_Size() int { + return m.Size() +} +func (m *MsgSkipUploaderRole) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSkipUploaderRole.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSkipUploaderRole proto.InternalMessageInfo + +func (m *MsgSkipUploaderRole) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgSkipUploaderRole) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *MsgSkipUploaderRole) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *MsgSkipUploaderRole) GetFromIndex() uint64 { + if m != nil { + return m.FromIndex + } + return 0 +} + +// MsgSubmitBundleProposalResponse defines the Msg/SubmitBundleProposal response type. +type MsgSkipUploaderRoleResponse struct { +} + +func (m *MsgSkipUploaderRoleResponse) Reset() { *m = MsgSkipUploaderRoleResponse{} } +func (m *MsgSkipUploaderRoleResponse) String() string { return proto.CompactTextString(m) } +func (*MsgSkipUploaderRoleResponse) ProtoMessage() {} +func (*MsgSkipUploaderRoleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{7} +} +func (m *MsgSkipUploaderRoleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSkipUploaderRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSkipUploaderRoleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSkipUploaderRoleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSkipUploaderRoleResponse.Merge(m, src) +} +func (m *MsgSkipUploaderRoleResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgSkipUploaderRoleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSkipUploaderRoleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSkipUploaderRoleResponse proto.InternalMessageInfo + +// MsgUpdateParams defines a SDK message for updating the module parameters. +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // payload defines the x/bundles parameters to update. + Payload string `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{8} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9ed52bfae1633bf9, []int{9} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("kyve.bundles.v1beta1.VoteType", VoteType_name, VoteType_value) + proto.RegisterType((*MsgSubmitBundleProposal)(nil), "kyve.bundles.v1beta1.MsgSubmitBundleProposal") + proto.RegisterType((*MsgSubmitBundleProposalResponse)(nil), "kyve.bundles.v1beta1.MsgSubmitBundleProposalResponse") + proto.RegisterType((*MsgVoteBundleProposal)(nil), "kyve.bundles.v1beta1.MsgVoteBundleProposal") + proto.RegisterType((*MsgVoteBundleProposalResponse)(nil), "kyve.bundles.v1beta1.MsgVoteBundleProposalResponse") + proto.RegisterType((*MsgClaimUploaderRole)(nil), "kyve.bundles.v1beta1.MsgClaimUploaderRole") + proto.RegisterType((*MsgClaimUploaderRoleResponse)(nil), "kyve.bundles.v1beta1.MsgClaimUploaderRoleResponse") + proto.RegisterType((*MsgSkipUploaderRole)(nil), "kyve.bundles.v1beta1.MsgSkipUploaderRole") + proto.RegisterType((*MsgSkipUploaderRoleResponse)(nil), "kyve.bundles.v1beta1.MsgSkipUploaderRoleResponse") + proto.RegisterType((*MsgUpdateParams)(nil), "kyve.bundles.v1beta1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "kyve.bundles.v1beta1.MsgUpdateParamsResponse") +} + +func init() { proto.RegisterFile("kyve/bundles/v1beta1/tx.proto", fileDescriptor_9ed52bfae1633bf9) } + +var fileDescriptor_9ed52bfae1633bf9 = []byte{ + // 728 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x55, 0x5f, 0x4f, 0xda, 0x5e, + 0x18, 0xa6, 0x8a, 0x08, 0xaf, 0xbf, 0x9f, 0x62, 0x85, 0x59, 0xea, 0xa8, 0x8e, 0xc4, 0xc4, 0xb9, + 0x09, 0x11, 0xb3, 0xdd, 0x83, 0x62, 0xd6, 0x38, 0x18, 0x01, 0x24, 0x71, 0x37, 0xe4, 0x40, 0xcf, + 0x4a, 0x03, 0xe5, 0x34, 0x3d, 0x07, 0x66, 0xcd, 0x92, 0xdd, 0xee, 0x72, 0xdf, 0x61, 0x5f, 0x61, + 0xfb, 0x0e, 0x5e, 0x9a, 0x5d, 0x2c, 0xbb, 0x5c, 0xf4, 0x8b, 0x2c, 0x3d, 0x85, 0xfa, 0x07, 0x48, + 0x34, 0xd9, 0xb2, 0x3b, 0xde, 0xe7, 0x79, 0xde, 0xbf, 0xbc, 0x6f, 0x0f, 0x24, 0x3b, 0xce, 0x00, + 0x67, 0x9a, 0xfd, 0x9e, 0xd6, 0xc5, 0x34, 0x33, 0xd8, 0x6d, 0x62, 0x86, 0x76, 0x33, 0xec, 0x34, + 0x6d, 0xd9, 0x84, 0x11, 0x31, 0xe6, 0xd2, 0xe9, 0x21, 0x9d, 0x1e, 0xd2, 0x72, 0xa2, 0x45, 0xa8, + 0x49, 0x68, 0x83, 0x6b, 0x32, 0x9e, 0xe1, 0x39, 0xc8, 0x31, 0x9d, 0xe8, 0xc4, 0xc3, 0xdd, 0x5f, + 0x1e, 0x9a, 0xfa, 0x31, 0x03, 0xab, 0x45, 0xaa, 0x57, 0xfb, 0x4d, 0xd3, 0x60, 0x79, 0x1e, 0xad, + 0x6c, 0x13, 0x8b, 0x50, 0xd4, 0x15, 0x25, 0x98, 0x6f, 0xd9, 0x18, 0x31, 0x62, 0x4b, 0xc2, 0x86, + 0xb0, 0x15, 0xa9, 0x8c, 0x4c, 0xf1, 0x11, 0x84, 0x28, 0x43, 0x1d, 0x6c, 0x4b, 0x33, 0x9c, 0x18, + 0x5a, 0xe2, 0x2a, 0xcc, 0x5b, 0x84, 0x74, 0x1b, 0x86, 0x26, 0xcd, 0x6e, 0x08, 0x5b, 0xc1, 0x4a, + 0xc8, 0x35, 0x55, 0x4d, 0x4c, 0x02, 0x50, 0x46, 0x6c, 0xa4, 0x63, 0x97, 0x0b, 0x72, 0xa7, 0xc8, + 0x10, 0x51, 0x35, 0x71, 0x0d, 0x22, 0x1a, 0x62, 0xa8, 0x41, 0x8d, 0x33, 0x2c, 0xcd, 0x71, 0xcf, + 0xb0, 0x0b, 0x54, 0x8d, 0x33, 0xec, 0x93, 0x6d, 0x44, 0xdb, 0x52, 0x88, 0xbb, 0x72, 0xf2, 0x15, + 0xa2, 0x6d, 0x37, 0xf0, 0x3b, 0x9b, 0x98, 0x0d, 0xa3, 0xa7, 0xe1, 0x53, 0x69, 0x9e, 0xbb, 0x46, + 0x5c, 0x44, 0x75, 0x01, 0x71, 0x1d, 0x16, 0xbc, 0x11, 0x79, 0xa1, 0xc3, 0x9c, 0x07, 0x0f, 0xe2, + 0xc1, 0x13, 0x10, 0xe6, 0xfe, 0x1d, 0xec, 0x48, 0x11, 0xaf, 0x49, 0xd7, 0x3e, 0xc2, 0x8e, 0x18, + 0x87, 0x10, 0x23, 0x9c, 0x00, 0x4e, 0xcc, 0x31, 0xe2, 0xc2, 0x9b, 0xb0, 0x38, 0x0a, 0xd9, 0x37, + 0x4d, 0x64, 0x3b, 0xd2, 0x02, 0xa7, 0xff, 0x1f, 0x46, 0xf5, 0xc0, 0xd4, 0x13, 0x58, 0x9f, 0x32, + 0xd7, 0x0a, 0xa6, 0x16, 0xe9, 0x51, 0x9c, 0xfa, 0x26, 0x40, 0xbc, 0x48, 0xf5, 0x3a, 0x61, 0xf8, + 0x9f, 0x4d, 0x3e, 0x0b, 0xc1, 0x01, 0x61, 0xde, 0xd0, 0x17, 0xb3, 0x4a, 0x7a, 0xd2, 0x56, 0xa5, + 0xdd, 0x0a, 0x6b, 0x8e, 0x85, 0x2b, 0x5c, 0x9b, 0x5a, 0x87, 0xe4, 0xc4, 0xb2, 0xfd, 0xc6, 0x10, + 0xc4, 0x8a, 0x54, 0xdf, 0xef, 0x22, 0xc3, 0x3c, 0xb6, 0xba, 0x04, 0x69, 0xd8, 0xae, 0x90, 0x2e, + 0xfe, 0x83, 0x6d, 0xa5, 0x14, 0x78, 0x3c, 0x29, 0x85, 0x5f, 0xc2, 0x47, 0x58, 0x71, 0xc7, 0xdf, + 0x31, 0xac, 0xbf, 0x54, 0xc1, 0x9d, 0xcd, 0x0b, 0xde, 0xd9, 0xbc, 0x54, 0x12, 0xd6, 0x26, 0x14, + 0xe0, 0xd7, 0xd7, 0x82, 0xa5, 0x22, 0xd5, 0x8f, 0x2d, 0x0d, 0x31, 0x5c, 0x46, 0x36, 0x32, 0xa9, + 0xf8, 0x12, 0x22, 0xa8, 0xcf, 0xda, 0xc4, 0x36, 0x98, 0xe3, 0x55, 0x97, 0x97, 0xbe, 0x7f, 0xdd, + 0x89, 0x0d, 0xaf, 0x38, 0xa7, 0x69, 0x36, 0xa6, 0xb4, 0xca, 0x6c, 0xa3, 0xa7, 0x57, 0xae, 0xa5, + 0x6e, 0x4f, 0x16, 0x72, 0xdc, 0x1c, 0xc3, 0xd2, 0x47, 0x66, 0x2a, 0xc1, 0x6f, 0xfb, 0x66, 0x92, + 0x51, 0xfe, 0xed, 0x1e, 0x84, 0x47, 0xff, 0xaa, 0x98, 0x80, 0x78, 0xfd, 0x4d, 0xad, 0xd0, 0xa8, + 0x9d, 0x94, 0x0b, 0x8d, 0xe3, 0x52, 0xb5, 0x5c, 0xd8, 0x57, 0x0f, 0xd5, 0xc2, 0x41, 0x34, 0x20, + 0xae, 0xc0, 0xd2, 0x35, 0x55, 0xcf, 0xbd, 0x56, 0x0f, 0xa2, 0x82, 0x18, 0x87, 0xe5, 0x6b, 0x50, + 0x2d, 0x79, 0xf0, 0xcc, 0x6d, 0x38, 0x97, 0xaf, 0xd6, 0x72, 0x6a, 0x29, 0x3a, 0x2b, 0x07, 0x3f, + 0x7d, 0x51, 0x02, 0xd9, 0xf3, 0x20, 0xcc, 0x16, 0xa9, 0x2e, 0x7e, 0x80, 0xd8, 0xc4, 0x6f, 0xcd, + 0xce, 0xe4, 0xcd, 0x9b, 0x72, 0x42, 0xf2, 0x8b, 0x07, 0xc9, 0x47, 0x5d, 0x8b, 0x03, 0x10, 0x27, + 0x5c, 0xdb, 0xb3, 0xa9, 0xc1, 0xc6, 0xc5, 0xf2, 0xde, 0x03, 0xc4, 0x7e, 0x5e, 0x0a, 0xcb, 0xe3, + 0xd7, 0xb0, 0x3d, 0x35, 0xd2, 0x98, 0x56, 0xce, 0xde, 0x5f, 0xeb, 0x27, 0xb5, 0x20, 0x3a, 0xb6, + 0xff, 0x4f, 0xa7, 0xcf, 0xed, 0x8e, 0x54, 0xde, 0xbd, 0xb7, 0xd4, 0xcf, 0xa8, 0xc1, 0x7f, 0xb7, + 0x36, 0x7a, 0x73, 0x6a, 0x88, 0x9b, 0x32, 0x79, 0xe7, 0x5e, 0xb2, 0x51, 0x96, 0xfc, 0xe1, 0xf9, + 0xa5, 0x22, 0x5c, 0x5c, 0x2a, 0xc2, 0xaf, 0x4b, 0x45, 0xf8, 0x7c, 0xa5, 0x04, 0x2e, 0xae, 0x94, + 0xc0, 0xcf, 0x2b, 0x25, 0xf0, 0xf6, 0xb9, 0x6e, 0xb0, 0x76, 0xbf, 0x99, 0x6e, 0x11, 0x33, 0x73, + 0x74, 0x52, 0x2f, 0x94, 0x30, 0x7b, 0x4f, 0xec, 0x4e, 0xa6, 0xd5, 0x46, 0x46, 0x2f, 0x73, 0xea, + 0x3f, 0xa6, 0xcc, 0xb1, 0x30, 0x6d, 0x86, 0xf8, 0x0b, 0xb8, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xde, 0x89, 0xbd, 0x79, 0x69, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // SubmitBundleProposal ... + SubmitBundleProposal(ctx context.Context, in *MsgSubmitBundleProposal, opts ...grpc.CallOption) (*MsgSubmitBundleProposalResponse, error) + // VoteBundleProposal ... + VoteBundleProposal(ctx context.Context, in *MsgVoteBundleProposal, opts ...grpc.CallOption) (*MsgVoteBundleProposalResponse, error) + // ClaimUploaderRole ... + ClaimUploaderRole(ctx context.Context, in *MsgClaimUploaderRole, opts ...grpc.CallOption) (*MsgClaimUploaderRoleResponse, error) + // SkipUploaderRole ... + SkipUploaderRole(ctx context.Context, in *MsgSkipUploaderRole, opts ...grpc.CallOption) (*MsgSkipUploaderRoleResponse, error) + // UpdateParams defines a governance operation for updating the x/bundles module + // parameters. The authority is hard-coded to the x/gov module account. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) SubmitBundleProposal(ctx context.Context, in *MsgSubmitBundleProposal, opts ...grpc.CallOption) (*MsgSubmitBundleProposalResponse, error) { + out := new(MsgSubmitBundleProposalResponse) + err := c.cc.Invoke(ctx, "/kyve.bundles.v1beta1.Msg/SubmitBundleProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) VoteBundleProposal(ctx context.Context, in *MsgVoteBundleProposal, opts ...grpc.CallOption) (*MsgVoteBundleProposalResponse, error) { + out := new(MsgVoteBundleProposalResponse) + err := c.cc.Invoke(ctx, "/kyve.bundles.v1beta1.Msg/VoteBundleProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ClaimUploaderRole(ctx context.Context, in *MsgClaimUploaderRole, opts ...grpc.CallOption) (*MsgClaimUploaderRoleResponse, error) { + out := new(MsgClaimUploaderRoleResponse) + err := c.cc.Invoke(ctx, "/kyve.bundles.v1beta1.Msg/ClaimUploaderRole", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) SkipUploaderRole(ctx context.Context, in *MsgSkipUploaderRole, opts ...grpc.CallOption) (*MsgSkipUploaderRoleResponse, error) { + out := new(MsgSkipUploaderRoleResponse) + err := c.cc.Invoke(ctx, "/kyve.bundles.v1beta1.Msg/SkipUploaderRole", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.bundles.v1beta1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // SubmitBundleProposal ... + SubmitBundleProposal(context.Context, *MsgSubmitBundleProposal) (*MsgSubmitBundleProposalResponse, error) + // VoteBundleProposal ... + VoteBundleProposal(context.Context, *MsgVoteBundleProposal) (*MsgVoteBundleProposalResponse, error) + // ClaimUploaderRole ... + ClaimUploaderRole(context.Context, *MsgClaimUploaderRole) (*MsgClaimUploaderRoleResponse, error) + // SkipUploaderRole ... + SkipUploaderRole(context.Context, *MsgSkipUploaderRole) (*MsgSkipUploaderRoleResponse, error) + // UpdateParams defines a governance operation for updating the x/bundles module + // parameters. The authority is hard-coded to the x/gov module account. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) SubmitBundleProposal(ctx context.Context, req *MsgSubmitBundleProposal) (*MsgSubmitBundleProposalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SubmitBundleProposal not implemented") +} +func (*UnimplementedMsgServer) VoteBundleProposal(ctx context.Context, req *MsgVoteBundleProposal) (*MsgVoteBundleProposalResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method VoteBundleProposal not implemented") +} +func (*UnimplementedMsgServer) ClaimUploaderRole(ctx context.Context, req *MsgClaimUploaderRole) (*MsgClaimUploaderRoleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClaimUploaderRole not implemented") +} +func (*UnimplementedMsgServer) SkipUploaderRole(ctx context.Context, req *MsgSkipUploaderRole) (*MsgSkipUploaderRoleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SkipUploaderRole not implemented") +} +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_SubmitBundleProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgSubmitBundleProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).SubmitBundleProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.bundles.v1beta1.Msg/SubmitBundleProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).SubmitBundleProposal(ctx, req.(*MsgSubmitBundleProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_VoteBundleProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgVoteBundleProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).VoteBundleProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.bundles.v1beta1.Msg/VoteBundleProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).VoteBundleProposal(ctx, req.(*MsgVoteBundleProposal)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ClaimUploaderRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgClaimUploaderRole) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ClaimUploaderRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.bundles.v1beta1.Msg/ClaimUploaderRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ClaimUploaderRole(ctx, req.(*MsgClaimUploaderRole)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_SkipUploaderRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgSkipUploaderRole) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).SkipUploaderRole(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.bundles.v1beta1.Msg/SkipUploaderRole", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).SkipUploaderRole(ctx, req.(*MsgSkipUploaderRole)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.bundles.v1beta1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.bundles.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SubmitBundleProposal", + Handler: _Msg_SubmitBundleProposal_Handler, + }, + { + MethodName: "VoteBundleProposal", + Handler: _Msg_VoteBundleProposal_Handler, + }, + { + MethodName: "ClaimUploaderRole", + Handler: _Msg_ClaimUploaderRole_Handler, + }, + { + MethodName: "SkipUploaderRole", + Handler: _Msg_SkipUploaderRole_Handler, + }, + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/bundles/v1beta1/tx.proto", +} + +func (m *MsgSubmitBundleProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSubmitBundleProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSubmitBundleProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BundleSummary) > 0 { + i -= len(m.BundleSummary) + copy(dAtA[i:], m.BundleSummary) + i = encodeVarintTx(dAtA, i, uint64(len(m.BundleSummary))) + i-- + dAtA[i] = 0x5a + } + if len(m.ToKey) > 0 { + i -= len(m.ToKey) + copy(dAtA[i:], m.ToKey) + i = encodeVarintTx(dAtA, i, uint64(len(m.ToKey))) + i-- + dAtA[i] = 0x52 + } + if len(m.FromKey) > 0 { + i -= len(m.FromKey) + copy(dAtA[i:], m.FromKey) + i = encodeVarintTx(dAtA, i, uint64(len(m.FromKey))) + i-- + dAtA[i] = 0x4a + } + if m.BundleSize != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.BundleSize)) + i-- + dAtA[i] = 0x40 + } + if m.FromIndex != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.FromIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.DataHash) > 0 { + i -= len(m.DataHash) + copy(dAtA[i:], m.DataHash) + i = encodeVarintTx(dAtA, i, uint64(len(m.DataHash))) + i-- + dAtA[i] = 0x32 + } + if m.DataSize != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DataSize)) + i-- + dAtA[i] = 0x28 + } + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintTx(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x22 + } + if m.PoolId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgSubmitBundleProposalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSubmitBundleProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSubmitBundleProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgVoteBundleProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgVoteBundleProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgVoteBundleProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Vote != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Vote)) + i-- + dAtA[i] = 0x28 + } + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintTx(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x22 + } + if m.PoolId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgVoteBundleProposalResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgVoteBundleProposalResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgVoteBundleProposalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgClaimUploaderRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimUploaderRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimUploaderRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgClaimUploaderRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimUploaderRoleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimUploaderRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgSkipUploaderRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSkipUploaderRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSkipUploaderRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FromIndex != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.FromIndex)) + i-- + dAtA[i] = 0x20 + } + if m.PoolId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgSkipUploaderRoleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSkipUploaderRoleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSkipUploaderRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTx(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgSubmitBundleProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovTx(uint64(m.PoolId)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.DataSize != 0 { + n += 1 + sovTx(uint64(m.DataSize)) + } + l = len(m.DataHash) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.FromIndex != 0 { + n += 1 + sovTx(uint64(m.FromIndex)) + } + if m.BundleSize != 0 { + n += 1 + sovTx(uint64(m.BundleSize)) + } + l = len(m.FromKey) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ToKey) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.BundleSummary) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgSubmitBundleProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgVoteBundleProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovTx(uint64(m.PoolId)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Vote != 0 { + n += 1 + sovTx(uint64(m.Vote)) + } + return n +} + +func (m *MsgVoteBundleProposalResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgClaimUploaderRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovTx(uint64(m.PoolId)) + } + return n +} + +func (m *MsgClaimUploaderRoleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgSkipUploaderRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovTx(uint64(m.PoolId)) + } + if m.FromIndex != 0 { + n += 1 + sovTx(uint64(m.FromIndex)) + } + return n +} + +func (m *MsgSkipUploaderRoleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgSubmitBundleProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSubmitBundleProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSubmitBundleProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSize", wireType) + } + m.DataSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIndex", wireType) + } + m.FromIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FromIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSize", wireType) + } + m.BundleSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BundleSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleSummary", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BundleSummary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSubmitBundleProposalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSubmitBundleProposalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSubmitBundleProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgVoteBundleProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgVoteBundleProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgVoteBundleProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= VoteType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgVoteBundleProposalResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgVoteBundleProposalResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgVoteBundleProposalResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimUploaderRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimUploaderRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimUploaderRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimUploaderRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimUploaderRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimUploaderRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSkipUploaderRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSkipUploaderRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSkipUploaderRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIndex", wireType) + } + m.FromIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FromIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSkipUploaderRoleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSkipUploaderRoleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSkipUploaderRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/bundles/types/types.go b/x/bundles/types/types.go new file mode 100644 index 00000000..d26d4556 --- /dev/null +++ b/x/bundles/types/types.go @@ -0,0 +1,25 @@ +package types + +type VoteDistribution struct { + // valid ... + Valid uint64 + // invalid ... + Invalid uint64 + // abstain ... + Abstain uint64 + // total ... + Total uint64 + // status ... + Status BundleStatus +} + +type BundleReward struct { + // treasury ... + Treasury uint64 + // uploader ... + Uploader uint64 + // delegation ... + Delegation uint64 + // total ... + Total uint64 +} diff --git a/x/delegation/client/cli/query.go b/x/delegation/client/cli/query.go new file mode 100644 index 00000000..2ec64253 --- /dev/null +++ b/x/delegation/client/cli/query.go @@ -0,0 +1,32 @@ +package cli + +import ( + "fmt" + // "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + // "github.com/cosmos/cosmos-sdk/client/flags" + // sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/KYVENetwork/chain/x/delegation/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(_ string) *cobra.Command { + // Group delegation queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdQueryParams()) + + // All other queries are found in the queries module + + return cmd +} diff --git a/x/delegation/client/cli/query_params.go b/x/delegation/client/cli/query_params.go new file mode 100644 index 00000000..ea5b0a38 --- /dev/null +++ b/x/delegation/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the delegation module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/delegation/client/cli/tx.go b/x/delegation/client/cli/tx.go new file mode 100644 index 00000000..e944d47a --- /dev/null +++ b/x/delegation/client/cli/tx.go @@ -0,0 +1,28 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdDelegate()) + cmd.AddCommand(CmdUndelegate()) + cmd.AddCommand(CmdRedelegate()) + cmd.AddCommand(CmdWithdrawRewards()) + + return cmd +} diff --git a/x/delegation/client/cli/tx_delegate.go b/x/delegation/client/cli/tx_delegate.go new file mode 100644 index 00000000..4c1b69a4 --- /dev/null +++ b/x/delegation/client/cli/tx_delegate.go @@ -0,0 +1,45 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdDelegate() *cobra.Command { + cmd := &cobra.Command{ + Use: "delegate [staker] [amount]", + Short: "Delegate the given amount (in nKYVE) to the given staker (address)", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argAmount, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgDelegate{ + Creator: clientCtx.GetFromAddress().String(), + Staker: args[0], + Amount: argAmount, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/delegation/client/cli/tx_redelegate.go b/x/delegation/client/cli/tx_redelegate.go new file mode 100644 index 00000000..2d4cc8de --- /dev/null +++ b/x/delegation/client/cli/tx_redelegate.go @@ -0,0 +1,46 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdRedelegate() *cobra.Command { + cmd := &cobra.Command{ + Use: "redelegate [from_staker] [to_staker] [amount]", + Short: "Redelegate the given amount from one staker to another", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argAmount, err := cast.ToUint64E(args[2]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgRedelegate{ + Creator: clientCtx.GetFromAddress().String(), + FromStaker: args[0], + ToStaker: args[1], + Amount: argAmount, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/delegation/client/cli/tx_undelegate.go b/x/delegation/client/cli/tx_undelegate.go new file mode 100644 index 00000000..160bb6f1 --- /dev/null +++ b/x/delegation/client/cli/tx_undelegate.go @@ -0,0 +1,45 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdUndelegate() *cobra.Command { + cmd := &cobra.Command{ + Use: "undelegate [staker] [amount]", + Short: "Start undelegating the given amount from the staker", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argAmount, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgUndelegate{ + Creator: clientCtx.GetFromAddress().String(), + Staker: args[0], + Amount: argAmount, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/delegation/client/cli/tx_withdraw_rewards.go b/x/delegation/client/cli/tx_withdraw_rewards.go new file mode 100644 index 00000000..7809419d --- /dev/null +++ b/x/delegation/client/cli/tx_withdraw_rewards.go @@ -0,0 +1,38 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +func CmdWithdrawRewards() *cobra.Command { + cmd := &cobra.Command{ + Use: "withdraw_rewards [staker]", + Short: "Withdraw collected rewards from staker", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgWithdrawRewards{ + Creator: clientCtx.GetFromAddress().String(), + Staker: args[0], + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/delegation/genesis.go b/x/delegation/genesis.go new file mode 100644 index 00000000..eaf98673 --- /dev/null +++ b/x/delegation/genesis.go @@ -0,0 +1,64 @@ +package delegation + +import ( + "github.com/KYVENetwork/chain/x/delegation/keeper" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the capability module's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) + + for _, delegator := range genState.DelegatorList { + k.SetDelegator(ctx, delegator) + } + + for _, entry := range genState.DelegationEntryList { + k.SetDelegationEntry(ctx, entry) + } + + for _, entry := range genState.DelegationDataList { + k.SetDelegationData(ctx, entry) + } + + for _, entry := range genState.DelegationSlashList { + k.SetDelegationSlashEntry(ctx, entry) + } + + for _, entry := range genState.UndelegationQueueEntryList { + k.SetUndelegationQueueEntry(ctx, entry) + } + + k.SetQueueState(ctx, genState.QueueStateUndelegation) + + for _, entry := range genState.RedelegationCooldownList { + k.SetRedelegationCooldown(ctx, entry) + } + + k.InitMemStore(ctx) +} + +// ExportGenesis returns the capability module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + + genesis.Params = k.GetParams(ctx) + + genesis.DelegatorList = k.GetAllDelegators(ctx) + + genesis.DelegationEntryList = k.GetAllDelegationEntries(ctx) + + genesis.DelegationDataList = k.GetAllDelegationData(ctx) + + genesis.DelegationSlashList = k.GetAllDelegationSlashEntries(ctx) + + genesis.UndelegationQueueEntryList = k.GetAllUnbondingDelegationQueueEntries(ctx) + + genesis.QueueStateUndelegation = k.GetQueueState(ctx) + + genesis.RedelegationCooldownList = k.GetAllRedelegationCooldownEntries(ctx) + + return genesis +} diff --git a/x/delegation/keeper/exported_functions.go b/x/delegation/keeper/exported_functions.go new file mode 100644 index 00000000..a43e304d --- /dev/null +++ b/x/delegation/keeper/exported_functions.go @@ -0,0 +1,98 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// These functions are meant to be called from external modules +// For now this is the bundles module which needs to interact +// with the delegation module. +// All these functions are safe in the way that they do not return errors +// and every edge case is handled within the function itself. + +// GetDelegationAmount returns the sum of all delegations for a specific staker. +// If the staker does not exist, it returns zero as the staker has zero delegations +func (k Keeper) GetDelegationAmount(ctx sdk.Context, staker string) uint64 { + delegationData, found := k.GetDelegationData(ctx, staker) + + if found { + return delegationData.TotalDelegation + } + + return 0 +} + +// GetDelegationAmountOfDelegator returns the amount of how many $KYVE `delegatorAddress` +// has delegated to `stakerAddress`. If one of the addresses does not exist, it returns zero. +func (k Keeper) GetDelegationAmountOfDelegator(ctx sdk.Context, stakerAddress string, delegatorAddress string) uint64 { + return k.f1GetCurrentDelegation(ctx, stakerAddress, delegatorAddress) +} + +// GetDelegationOfPool returns the amount of how many $KYVE users have delegated +// to stakers that are participating in the given pool +func (k Keeper) GetDelegationOfPool(ctx sdk.Context, poolId uint64) uint64 { + totalDelegation := uint64(0) + for _, address := range k.stakersKeeper.GetAllStakerAddressesOfPool(ctx, poolId) { + totalDelegation += k.GetDelegationAmount(ctx, address) + } + return totalDelegation +} + +// PayoutRewards transfers `amount` $nKYVE from the `payerModuleName`-module to the delegation module. +// It then awards these tokens internally to all delegators of staker `staker`. +// Delegators can then receive these rewards if they call the `withdraw`-transaction. +// This method returns false if the payout fails. This happens usually if there are no +// delegators for that staker. If this happens one should do something else with the rewards. +func (k Keeper) PayoutRewards(ctx sdk.Context, staker string, amount uint64, payerModuleName string) (success bool) { + // Assert there are delegators + if k.DoesDelegationDataExist(ctx, staker) { + + // Add amount to the rewards pool + k.AddAmountToDelegationRewards(ctx, staker, amount) + + // Transfer tokens to the delegation module + err := util.TransferFromModuleToModule(k.bankKeeper, ctx, payerModuleName, types.ModuleName, amount) + if err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "Not enough tokens in module") + return false + } + return true + } + return false +} + +// SlashDelegators reduces the delegation of all delegators of `staker` by fraction +// and transfers the amount to the Treasury. +func (k Keeper) SlashDelegators(ctx sdk.Context, poolId uint64, staker string, slashType types.SlashType) { + // Only slash if staker has delegators + if k.DoesDelegationDataExist(ctx, staker) { + + // Update in-memory staker index for efficient queries + k.RemoveStakerIndex(ctx, staker) + defer k.SetStakerIndex(ctx, staker) + + // Perform F1-slash and get slashed amount in ukyve + slashedAmount := k.f1Slash(ctx, staker, k.getSlashFraction(ctx, slashType)) + + // Transfer tokens to the Treasury + if err := util.TransferFromModuleToTreasury(k.accountKeeper, k.distrKeeper, ctx, types.ModuleName, slashedAmount); err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "Not enough tokens in module") + } + + // Emit slash event + _ = ctx.EventManager().EmitTypedEvent(&types.EventSlash{ + PoolId: poolId, + Staker: staker, + Amount: slashedAmount, + SlashType: slashType, + }) + } +} + +// GetOutstandingRewards calculates the current rewards a delegator has collected for +// the given staker. +func (k Keeper) GetOutstandingRewards(ctx sdk.Context, staker string, delegator string) uint64 { + return k.f1GetOutstandingRewards(ctx, staker, delegator) +} diff --git a/x/delegation/keeper/getters_delegation_data.go b/x/delegation/keeper/getters_delegation_data.go new file mode 100644 index 00000000..f8c6fab8 --- /dev/null +++ b/x/delegation/keeper/getters_delegation_data.go @@ -0,0 +1,71 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// The `DelegationData` stores general aggregated variables for each existing staker +// as well as necessary variables needed for the F1 distribution algorithm +// Look at the proto-file for detailed explanation of the variables. +// Every staker with at least one delegator has this entry. + +// AddAmountToDelegationRewards adds the specified amount to the current delegationData object. +// This is needed by the F1-algorithm to calculate to outstanding rewards +func (k Keeper) AddAmountToDelegationRewards(ctx sdk.Context, stakerAddress string, amount uint64) { + delegationData, found := k.GetDelegationData(ctx, stakerAddress) + if found { + delegationData.CurrentRewards += amount + k.SetDelegationData(ctx, delegationData) + } +} + +// SetDelegationData set a specific delegationPoolData in the store from its index +func (k Keeper) SetDelegationData(ctx sdk.Context, delegationData types.DelegationData) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationDataKeyPrefix) + b := k.cdc.MustMarshal(&delegationData) + store.Set(types.DelegationDataKey(delegationData.Staker), b) +} + +// GetDelegationData returns a delegationData entry for a specific staker +// with `stakerAddress` +func (k Keeper) GetDelegationData(ctx sdk.Context, stakerAddress string) (val types.DelegationData, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationDataKeyPrefix) + + b := store.Get(types.DelegationDataKey(stakerAddress)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// DoesDelegationDataExist check if the staker with `stakerAddress` has +// a delegation data entry. This is the case if the staker as at least one delegator. +func (k Keeper) DoesDelegationDataExist(ctx sdk.Context, stakerAddress string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationDataKeyPrefix) + return store.Has(types.DelegationDataKey(stakerAddress)) +} + +// RemoveDelegationData removes a delegationData entry from the pool +func (k Keeper) RemoveDelegationData(ctx sdk.Context, stakerAddress string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationDataKeyPrefix) + store.Delete(types.DelegationDataKey(stakerAddress)) +} + +// GetAllDelegationData returns all delegationData entries +func (k Keeper) GetAllDelegationData(ctx sdk.Context) (list []types.DelegationData) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationDataKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.DelegationData + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/delegation/keeper/getters_delegation_entries.go b/x/delegation/keeper/getters_delegation_entries.go new file mode 100644 index 00000000..15dd6ec7 --- /dev/null +++ b/x/delegation/keeper/getters_delegation_entries.go @@ -0,0 +1,73 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// The `DelegationEntry` stores the quotient of the collected rewards +// and the total delegation of every period. A period is a phase +// where the total delegation was unchanged and just rewards were +// paid out. More details can be found in the specs of this module + +// SetDelegationEntry set a specific delegationEntry in the store for the staker +// and a given index +func (k Keeper) SetDelegationEntry(ctx sdk.Context, delegationEntries types.DelegationEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationEntriesKeyPrefix) + b := k.cdc.MustMarshal(&delegationEntries) + store.Set(types.DelegationEntriesKey( + delegationEntries.Staker, + delegationEntries.KIndex, + ), b) +} + +// GetDelegationEntry returns a delegationEntry from its index +func (k Keeper) GetDelegationEntry( + ctx sdk.Context, + stakerAddress string, + kIndex uint64, +) (val types.DelegationEntry, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationEntriesKeyPrefix) + + b := store.Get(types.DelegationEntriesKey( + stakerAddress, + kIndex, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemoveDelegationEntry removes a delegationEntry for the given staker with the +// given index from the store +func (k Keeper) RemoveDelegationEntry( + ctx sdk.Context, + stakerAddress string, + kIndex uint64, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationEntriesKeyPrefix) + store.Delete(types.DelegationEntriesKey( + stakerAddress, + kIndex, + )) +} + +// GetAllDelegationEntries returns all delegationEntries (of all stakers) +func (k Keeper) GetAllDelegationEntries(ctx sdk.Context) (list []types.DelegationEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationEntriesKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.DelegationEntry + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/delegation/keeper/getters_delegation_slash.go b/x/delegation/keeper/getters_delegation_slash.go new file mode 100644 index 00000000..d3764ad5 --- /dev/null +++ b/x/delegation/keeper/getters_delegation_slash.go @@ -0,0 +1,90 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// The `DelegationSlash` entry stores every slash that happened to a staker. +// This is needed by the F1-Fee algorithm to correctly calculate the +// remaining delegation of delegators whose staker got slashed. + +// SetDelegationSlashEntry for the affected staker with the index of the period +// the slash is starting. +func (k Keeper) SetDelegationSlashEntry(ctx sdk.Context, slashEntry types.DelegationSlash) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationSlashEntriesKeyPrefix) + b := k.cdc.MustMarshal(&slashEntry) + store.Set(types.DelegationEntriesKey( + slashEntry.Staker, + slashEntry.KIndex, + ), b) +} + +// GetDelegationSlashEntry returns a DelegationSlash for the given staker and index. +func (k Keeper) GetDelegationSlashEntry( + ctx sdk.Context, + stakerAddress string, + kIndex uint64, +) (val types.DelegationSlash, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationSlashEntriesKeyPrefix) + + b := store.Get(types.DelegationSlashEntriesKey( + stakerAddress, + kIndex, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemoveDelegationSlashEntry removes an entry for a given staker and index +func (k Keeper) RemoveDelegationSlashEntry( + ctx sdk.Context, + stakerAddress string, + kIndex uint64, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationSlashEntriesKeyPrefix) + store.Delete(types.DelegationSlashEntriesKey( + stakerAddress, + kIndex, + )) +} + +// GetAllDelegationSlashEntries returns all delegation slash entries (of all stakers) +func (k Keeper) GetAllDelegationSlashEntries(ctx sdk.Context) (list []types.DelegationSlash) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationSlashEntriesKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.DelegationSlash + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetAllDelegationSlashesBetween returns all Slashes that happened between the given periods +// `start` and `end` are both inclusive. +func (k Keeper) GetAllDelegationSlashesBetween(ctx sdk.Context, staker string, start uint64, end uint64) (list []types.DelegationSlash) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegationSlashEntriesKeyPrefix) + + // use iterator with end+1 because the end of the iterator is exclusive + iterator := store.Iterator(util.GetByteKey(staker, start), util.GetByteKey(staker, end+1)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.DelegationSlash + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/delegation/keeper/getters_delegator.go b/x/delegation/keeper/getters_delegator.go new file mode 100644 index 00000000..7a55cf14 --- /dev/null +++ b/x/delegation/keeper/getters_delegator.go @@ -0,0 +1,104 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// `Delegator` is created for every delegator (address) that delegates +// to a staker. It stores the initial amount delegated and the index +// of the F1-period where the user started to become a delegator. +// When the user performs a redelegation this object is recreated. +// To query the current delegation use `GetDelegationAmountOfDelegator()` +// as the `initialAmount` does not consider slashes. + +// SetDelegator set a specific delegator in the store from its index +func (k Keeper) SetDelegator(ctx sdk.Context, delegator types.Delegator) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefix) + b := k.cdc.MustMarshal(&delegator) + store.Set(types.DelegatorKey( + delegator.Staker, + delegator.Delegator, + ), b) + + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefixIndex2) + indexStore.Set(types.DelegatorKeyIndex2( + delegator.Delegator, + delegator.Staker, + ), []byte{1}) +} + +// GetDelegator returns a delegator from its index +func (k Keeper) GetDelegator( + ctx sdk.Context, + stakerAddress string, + delegatorAddress string, +) (val types.Delegator, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefix) + b := store.Get(types.DelegatorKey(stakerAddress, delegatorAddress)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// DoesDelegatorExist checks if the key exists in the KV-store +func (k Keeper) DoesDelegatorExist( + ctx sdk.Context, + stakerAddress string, + delegatorAddress string, +) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefix) + return store.Has(types.DelegatorKey(stakerAddress, delegatorAddress)) +} + +// RemoveDelegator removes a delegator from the store +func (k Keeper) RemoveDelegator( + ctx sdk.Context, + stakerAddress string, + delegatorAddress string, +) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefix) + store.Delete(types.DelegatorKey( + stakerAddress, + delegatorAddress, + )) + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefixIndex2) + indexStore.Delete(types.DelegatorKeyIndex2( + delegatorAddress, + stakerAddress, + )) +} + +// GetAllDelegators returns all delegators (of all stakers) +func (k Keeper) GetAllDelegators(ctx sdk.Context) (list []types.Delegator) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Delegator + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +func (k Keeper) GetStakersByDelegator(ctx sdk.Context, delegator string) (list []string) { + delegatorStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.DelegatorKeyPrefixIndex2) + iterator := sdk.KVStorePrefixIterator(delegatorStore, util.GetByteKey(delegator)) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + staker := string(iterator.Key()[43 : 43+43]) + list = append(list, staker) + } + return +} diff --git a/x/delegation/keeper/getters_index_stakers.go b/x/delegation/keeper/getters_index_stakers.go new file mode 100644 index 00000000..7bfae3bc --- /dev/null +++ b/x/delegation/keeper/getters_index_stakers.go @@ -0,0 +1,158 @@ +package keeper + +import ( + "fmt" + "math" + "sort" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// arrayPagination helps to parse the query.PageRequest for an array +// instead of a KV-Store. +func arrayPagination(slice []string, pagination *query.PageRequest) ([]string, *query.PageResponse, error) { + if pagination != nil && pagination.Key != nil { + return nil, nil, fmt.Errorf("key pagination not supported") + } + + page, limit, err := query.ParsePagination(pagination) + if err != nil { + return nil, nil, err + } + + resultLength := util.MinInt(limit, len(slice)-page*limit) + result := make([]string, resultLength) + + for i := 0; i < resultLength; i++ { + result[i] = slice[page*limit+i] + } + + pageRes := &query.PageResponse{ + NextKey: nil, + Total: uint64(len(slice)), + } + + return result, pageRes, nil +} + +// arrayPaginationAccumulator helps to parse the query.PageRequest for an array +// instead of a KV-Store. +func arrayPaginationAccumulator(slice []string, pagination *query.PageRequest, accumulator func(address string, accumulate bool) bool) (*query.PageResponse, error) { + if pagination != nil && pagination.Key != nil { + return nil, fmt.Errorf("key pagination not supported") + } + + page, limit, err := query.ParsePagination(pagination) + if err != nil { + return nil, err + } + + count := 0 + minIndex := (page - 1) * limit + maxIndex := (page) * limit + + for i := 0; i < len(slice); i++ { + if accumulator(slice[i], count >= minIndex && count < maxIndex) { + count++ + } + } + + pageRes := &query.PageResponse{ + NextKey: nil, + Total: uint64(count), + } + + return pageRes, nil +} + +// SetStakerIndex sets and Index-entry which sorts all stakers (active and passive) +// by its total delegation +func (k Keeper) SetStakerIndex(ctx sdk.Context, staker string) { + amount := k.GetDelegationAmount(ctx, staker) + store := prefix.NewStore(ctx.KVStore(k.memKey), types.StakerIndexKeyPrefix) + store.Set(types.StakerIndexKey(math.MaxUint64-amount, staker), []byte{0}) +} + +// RemoveStakerIndex deletes and Index-entry which sorts all stakers (active and passive) +// by its total delegation +func (k Keeper) RemoveStakerIndex(ctx sdk.Context, staker string) { + amount := k.GetDelegationAmount(ctx, staker) + store := prefix.NewStore(ctx.KVStore(k.memKey), types.StakerIndexKeyPrefix) + store.Delete(types.StakerIndexKey(math.MaxUint64-amount, staker)) +} + +// GetPaginatedStakersByDelegation returns all stakers (active and inactive) +// sorted by its current total delegation. It supports the cosmos query.PageRequest pagination. +func (k Keeper) GetPaginatedStakersByDelegation(ctx sdk.Context, pagination *query.PageRequest, accumulator func(staker string, accumulate bool) bool) (*query.PageResponse, error) { + store := prefix.NewStore(ctx.KVStore(k.memKey), types.StakerIndexKeyPrefix) + + pageRes, err := query.FilteredPaginate(store, pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + address := string(key[8 : 8+43]) + return accumulator(address, accumulate), nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return pageRes, nil +} + +// GetPaginatedActiveStakersByDelegation returns all active stakers +// sorted by its current total delegation. It supports the cosmos query.PageRequest pagination. +func (k Keeper) GetPaginatedActiveStakersByDelegation(ctx sdk.Context, pagination *query.PageRequest, accumulator func(staker string, accumulate bool) bool) (*query.PageResponse, error) { + activeStakers := k.stakersKeeper.GetActiveStakers(ctx) + + sort.Slice(activeStakers, func(i, j int) bool { + return k.GetDelegationAmount(ctx, activeStakers[i]) > k.GetDelegationAmount(ctx, activeStakers[j]) + }) + + pageRes, err := arrayPaginationAccumulator(activeStakers, pagination, accumulator) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return pageRes, nil +} + +// GetPaginatedInactiveStakersByDelegation returns all inactive stakers +// sorted by its current total delegation. It supports the cosmos query.PageRequest pagination. +func (k Keeper) GetPaginatedInactiveStakersByDelegation(ctx sdk.Context, pagination *query.PageRequest, accumulator func(staker string, accumulate bool) bool) (*query.PageResponse, error) { + store := prefix.NewStore(ctx.KVStore(k.memKey), types.StakerIndexKeyPrefix) + + pageRes, err := query.FilteredPaginate(store, pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + address := string(key[8 : 8+43]) + if k.stakersKeeper.GetPoolCount(ctx, address) > 0 { + return false, nil + } + return accumulator(address, accumulate), nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return pageRes, nil +} + +// GetPaginatedActiveStakersByPoolCountAndDelegation returns all active stakers +// sorted by the amount of pools they are participating. If the poolCount is equal +// they are sorted by current total delegation. It supports the cosmos query.PageRequest pagination. +func (k Keeper) GetPaginatedActiveStakersByPoolCountAndDelegation(ctx sdk.Context, pagination *query.PageRequest) ([]string, *query.PageResponse, error) { + activeStakers := k.stakersKeeper.GetActiveStakers(ctx) + sort.Slice(activeStakers, func(i, j int) bool { + pc_i := k.stakersKeeper.GetPoolCount(ctx, activeStakers[i]) + pc_j := k.stakersKeeper.GetPoolCount(ctx, activeStakers[j]) + + if pc_i == pc_j { + return k.GetDelegationAmount(ctx, activeStakers[i]) > k.GetDelegationAmount(ctx, activeStakers[j]) + } + return pc_i > pc_j + }) + + return arrayPagination(activeStakers, pagination) +} diff --git a/x/delegation/keeper/getters_params.go b/x/delegation/keeper/getters_params.go new file mode 100644 index 00000000..4099ba44 --- /dev/null +++ b/x/delegation/keeper/getters_params.go @@ -0,0 +1,69 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams returns the current x/delegation module parameters. +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + store := ctx.KVStore(k.storeKey) + + bz := store.Get(types.ParamsKey) + if bz == nil { + return params + } + + k.cdc.MustUnmarshal(bz, ¶ms) + return params +} + +// GetUnbondingDelegationTime returns the UnbondingDelegationTime param +func (k Keeper) GetUnbondingDelegationTime(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).UnbondingDelegationTime +} + +// GetRedelegationCooldown returns the RedelegationCooldown param +func (k Keeper) GetRedelegationCooldown(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).RedelegationCooldown +} + +// GetRedelegationMaxAmount returns the RedelegationMaxAmount param +func (k Keeper) GetRedelegationMaxAmount(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).RedelegationMaxAmount +} + +// GetVoteSlash returns the VoteSlash param +func (k Keeper) GetVoteSlash(ctx sdk.Context) (res string) { + return k.GetParams(ctx).VoteSlash +} + +// GetUploadSlash returns the UploadSlash param +func (k Keeper) GetUploadSlash(ctx sdk.Context) (res string) { + return k.GetParams(ctx).UploadSlash +} + +// GetTimeoutSlash returns the TimeoutSlash param +func (k Keeper) GetTimeoutSlash(ctx sdk.Context) (res string) { + return k.GetParams(ctx).TimeoutSlash +} + +func (k Keeper) getSlashFraction(ctx sdk.Context, slashType types.SlashType) (slashAmountRatio sdk.Dec) { + // Retrieve slash fraction from params + switch slashType { + case types.SLASH_TYPE_TIMEOUT: + slashAmountRatio, _ = sdk.NewDecFromStr(k.GetTimeoutSlash(ctx)) + case types.SLASH_TYPE_VOTE: + slashAmountRatio, _ = sdk.NewDecFromStr(k.GetVoteSlash(ctx)) + case types.SLASH_TYPE_UPLOAD: + slashAmountRatio, _ = sdk.NewDecFromStr(k.GetUploadSlash(ctx)) + } + return +} + +// SetParams sets the x/delegation module parameters. +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(¶ms) + store.Set(types.ParamsKey, bz) +} diff --git a/x/delegation/keeper/getters_redelegation.go b/x/delegation/keeper/getters_redelegation.go new file mode 100644 index 00000000..fdeb18be --- /dev/null +++ b/x/delegation/keeper/getters_redelegation.go @@ -0,0 +1,56 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetRedelegationCooldown ... +func (k Keeper) SetRedelegationCooldown(ctx sdk.Context, redelegationCooldown types.RedelegationCooldown) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RedelegationCooldownPrefix) + store.Set(types.RedelegationCooldownKey( + redelegationCooldown.Address, + redelegationCooldown.CreationDate, + ), []byte{1}) +} + +// GetRedelegationCooldownEntries ... +func (k Keeper) GetRedelegationCooldownEntries(ctx sdk.Context, delegatorAddress string) (creationDates []uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), append(types.RedelegationCooldownPrefix, util.GetByteKey(delegatorAddress)...)) + iterator := sdk.KVStorePrefixIterator(store, nil) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + creationDates = append(creationDates, binary.BigEndian.Uint64(iterator.Key()[0:8])) + } + return +} + +// RemoveRedelegationCooldown ... +func (k Keeper) RemoveRedelegationCooldown(ctx sdk.Context, delegatorAddress string, block uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RedelegationCooldownPrefix) + store.Delete(types.RedelegationCooldownKey(delegatorAddress, block)) +} + +// GetAllRedelegationCooldownEntries ... +func (k Keeper) GetAllRedelegationCooldownEntries(ctx sdk.Context) (list []types.RedelegationCooldown) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RedelegationCooldownPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + val := types.RedelegationCooldown{ + Address: string(iterator.Key()[0:43]), + CreationDate: binary.BigEndian.Uint64(iterator.Key()[43:51]), + } + list = append(list, val) + } + + return +} diff --git a/x/delegation/keeper/getters_undelegation.go b/x/delegation/keeper/getters_undelegation.go new file mode 100644 index 00000000..524196ae --- /dev/null +++ b/x/delegation/keeper/getters_undelegation.go @@ -0,0 +1,111 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// ##################### +// === QUEUE ENTRIES === +// ##################### + +// SetUndelegationQueueEntry ... +func (k Keeper) SetUndelegationQueueEntry(ctx sdk.Context, undelegationQueueEntry types.UndelegationQueueEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.UndelegationQueueKeyPrefix) + b := k.cdc.MustMarshal(&undelegationQueueEntry) + store.Set(types.UndelegationQueueKey( + undelegationQueueEntry.Index, + ), b) + + // Insert the same entry with a different key prefix for query lookup + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.UndelegationQueueKeyPrefixIndex2) + indexStore.Set(types.UndelegationQueueKeyIndex2( + undelegationQueueEntry.Delegator, + undelegationQueueEntry.Index, + ), []byte{}) +} + +// GetUndelegationQueueEntry ... +func (k Keeper) GetUndelegationQueueEntry(ctx sdk.Context, index uint64) (val types.UndelegationQueueEntry, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.UndelegationQueueKeyPrefix) + + b := store.Get(types.UndelegationQueueKey(index)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemoveUndelegationQueueEntry ... +func (k Keeper) RemoveUndelegationQueueEntry(ctx sdk.Context, undelegationQueueEntry *types.UndelegationQueueEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.UndelegationQueueKeyPrefix) + store.Delete(types.UndelegationQueueKey(undelegationQueueEntry.Index)) + + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.UndelegationQueueKeyPrefixIndex2) + indexStore.Delete(types.UndelegationQueueKeyIndex2( + undelegationQueueEntry.Delegator, + undelegationQueueEntry.Index, + )) +} + +// GetAllUnbondingDelegationQueueEntries returns all delegator unbondings +func (k Keeper) GetAllUnbondingDelegationQueueEntries(ctx sdk.Context) (list []types.UndelegationQueueEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.UndelegationQueueKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.UndelegationQueueEntry + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetAllUnbondingDelegationQueueEntriesOfDelegator returns all delegator unbondings of the given address +func (k Keeper) GetAllUnbondingDelegationQueueEntriesOfDelegator(ctx sdk.Context, address string) (list []types.UndelegationQueueEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), util.GetByteKey(types.UndelegationQueueKeyPrefixIndex2, address)) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + index := binary.BigEndian.Uint64(iterator.Key()[0:8]) + + entry, _ := k.GetUndelegationQueueEntry(ctx, index) + list = append(list, entry) + } + + return +} + +// ################### +// === QUEUE STATE === +// ################### + +// GetQueueState returns the state for the undelegation queue +func (k Keeper) GetQueueState(ctx sdk.Context) (state types.QueueState) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + b := store.Get(types.QueueKey) + + if b == nil { + return state + } + + k.cdc.MustUnmarshal(b, &state) + return +} + +// SetQueueState saves the undelegation queue state +func (k Keeper) SetQueueState(ctx sdk.Context, state types.QueueState) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + b := k.cdc.MustMarshal(&state) + store.Set(types.QueueKey, b) +} diff --git a/x/delegation/keeper/grpc_query.go b/x/delegation/keeper/grpc_query.go new file mode 100644 index 00000000..2ea0edef --- /dev/null +++ b/x/delegation/keeper/grpc_query.go @@ -0,0 +1,21 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ types.QueryServer = Keeper{} + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/delegation/keeper/keeper.go b/x/delegation/keeper/keeper.go new file mode 100644 index 00000000..8e2e3d4b --- /dev/null +++ b/x/delegation/keeper/keeper.go @@ -0,0 +1,82 @@ +package keeper + +import ( + "fmt" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + + authority string + + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper + distrKeeper types.DistrKeeper + poolKeeper types.PoolKeeper + upgradeKeeper types.UpgradeKeeper + stakersKeeper types.StakersKeeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storetypes.StoreKey, + memKey storetypes.StoreKey, + + authority string, + + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, + distrkeeper types.DistrKeeper, + poolKeeper types.PoolKeeper, + upgradeKeeper types.UpgradeKeeper, + stakersKeeper types.StakersKeeper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + + authority: authority, + + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + distrKeeper: distrkeeper, + poolKeeper: poolKeeper, + upgradeKeeper: upgradeKeeper, + stakersKeeper: stakersKeeper, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func (k Keeper) StoreKey() storetypes.StoreKey { + return k.storeKey +} + +var memStoreInitialized = false + +func (k Keeper) InitMemStore(gasCtx sdk.Context) { + if !memStoreInitialized { + + // Update mem index + noGasCtx := gasCtx.WithBlockGasMeter(sdk.NewInfiniteGasMeter()) + for _, entry := range k.GetAllDelegationData(noGasCtx) { + k.SetStakerIndex(noGasCtx, entry.Staker) + } + + memStoreInitialized = true + } +} diff --git a/x/delegation/keeper/keeper_suite_test.go b/x/delegation/keeper/keeper_suite_test.go new file mode 100644 index 00000000..37185cbb --- /dev/null +++ b/x/delegation/keeper/keeper_suite_test.go @@ -0,0 +1,61 @@ +package keeper_test + +import ( + "fmt" + "testing" + + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestDelegationKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Keeper Test Suite", types.ModuleName)) +} + +func PayoutRewards(s *i.KeeperTestSuite, staker string, amount uint64) { + err := s.App().PoolKeeper.ChargeFundersOfPool(s.Ctx(), 0, amount) + Expect(err).To(BeNil()) + success := s.App().DelegationKeeper.PayoutRewards(s.Ctx(), staker, amount, pooltypes.ModuleName) + Expect(success).To(BeTrue()) +} + +func CreateFundedPool(s *i.KeeperTestSuite) { + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.CommitAfterSeconds(7) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.CommitAfterSeconds(7) + + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(poolFound).To(BeTrue()) + Expect(pool.TotalFunds).To(Equal(100 * i.KYVE)) +} + +func CheckAndContinueChainForOneMonth(s *i.KeeperTestSuite) { + s.PerformValidityChecks() + + for d := 0; d < 31; d++ { + s.CommitAfterSeconds(60 * 60 * 24) + s.PerformValidityChecks() + } +} diff --git a/x/delegation/keeper/logic_delegation.go b/x/delegation/keeper/logic_delegation.go new file mode 100644 index 00000000..2ce9331e --- /dev/null +++ b/x/delegation/keeper/logic_delegation.go @@ -0,0 +1,73 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Delegate performs a safe delegation with all necessary checks +// Warning: does not transfer the amount (only the rewards) +func (k Keeper) performDelegation(ctx sdk.Context, stakerAddress string, delegatorAddress string, amount uint64) { + // Update in-memory staker index for efficient queries + k.RemoveStakerIndex(ctx, stakerAddress) + defer k.SetStakerIndex(ctx, stakerAddress) + + if k.DoesDelegatorExist(ctx, stakerAddress, delegatorAddress) { + // If the sender is already a delegator, first perform an undelegation, before delegating. + // "perform a withdrawal" + _ = k.performWithdrawal(ctx, stakerAddress, delegatorAddress) + + // Perform delegation by fully undelegating and then delegating the new amount + unDelegateAmount := k.f1RemoveDelegator(ctx, stakerAddress, delegatorAddress) + newDelegationAmount := unDelegateAmount + amount + k.f1CreateDelegator(ctx, stakerAddress, delegatorAddress, newDelegationAmount) + } else { + // If the sender isn't a delegator, simply create a new delegation entry. + k.f1CreateDelegator(ctx, stakerAddress, delegatorAddress, amount) + } +} + +// performUndelegation performs immediately an undelegation of the given amount from the given staker +// If the amount is greater than the available amount, only the available amount will be undelegated. +// This method also transfers the rewards back to the given user. +func (k Keeper) performUndelegation(ctx sdk.Context, stakerAddress string, delegatorAddress string, amount uint64) uint64 { + // Update in-memory staker index for efficient queries + k.RemoveStakerIndex(ctx, stakerAddress) + defer k.SetStakerIndex(ctx, stakerAddress) + + // Withdraw all outstanding rewards + k.performWithdrawal(ctx, stakerAddress, delegatorAddress) + + // Perform an internal re-delegation. + undelegatedAmount := k.f1RemoveDelegator(ctx, stakerAddress, delegatorAddress) + + redelegation := uint64(0) + if undelegatedAmount > amount { + // if user didnt undelegate everything ... + redelegation = undelegatedAmount - amount + // ... create a new delegator entry with the remaining amount + k.f1CreateDelegator(ctx, stakerAddress, delegatorAddress, redelegation) + } + + return undelegatedAmount - redelegation +} + +// performWithdrawal withdraws all pending rewards from a user and transfers it. +// The amount is returned by the function. +func (k Keeper) performWithdrawal(ctx sdk.Context, stakerAddress, delegatorAddress string) uint64 { + reward := k.f1WithdrawRewards(ctx, stakerAddress, delegatorAddress) + err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, delegatorAddress, reward) + if err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "no money left in module") + } + + // Emit withdraw event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventWithdrawRewards{ + Address: delegatorAddress, + Staker: stakerAddress, + Amount: reward, + }) + + return reward +} diff --git a/x/delegation/keeper/logic_f1distribution.go b/x/delegation/keeper/logic_f1distribution.go new file mode 100644 index 00000000..c00d36e5 --- /dev/null +++ b/x/delegation/keeper/logic_f1distribution.go @@ -0,0 +1,340 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +/* +This file is responsible for implementing the F1-Fee distribution as described in +https://drops.dagstuhl.de/opus/volltexte/2020/11974/pdf/OASIcs-Tokenomics-2019-10.pdf + +We recommend reading the paper first before reading the code. +This file covers all relevant methods to fully implement the algorithm. +It also takes fully care of the entire state. The only interaction needed +is covered by the available methods. +*/ + +// f1StartNewPeriod finishes the current period according to the F1-Paper +// It returns the index of the new period. +// delegationData is passed as a pointer and updated in this method +// it's the responsibility of the caller to save the meta-data state. +// This method only writes to the entries. +func (k Keeper) f1StartNewPeriod(ctx sdk.Context, staker string, delegationData *types.DelegationData) uint64 { + // Ending the current period is performed by getting the entry + // of the previous index and adding the current quotient of + // $T_f / n_f$ + + // Get previous entry + // F1: corresponds to $Entry_{f-1}$ + previousEntry, found := k.GetDelegationEntry(ctx, staker, delegationData.LatestIndexK) + if !found { + previousEntry.Value = sdk.NewDec(0) + } + + // Calculate quotient of current period + // If totalDelegation is zero the quotient is also zero + currentPeriodValue := sdk.NewDec(0) + if delegationData.TotalDelegation != 0 { + decCurrentRewards := sdk.NewDec(int64(delegationData.CurrentRewards)) + decTotalDelegation := sdk.NewDec(int64(delegationData.TotalDelegation)) + + // F1: $T_f / n_f$ + currentPeriodValue = decCurrentRewards.Quo(decTotalDelegation) + } + + // Add previous entry to current one + currentPeriodValue = currentPeriodValue.Add(previousEntry.Value) + + // Increment index for the next period + indexF := delegationData.LatestIndexK + 1 + + // Add entry for new period to KV-Store + k.SetDelegationEntry(ctx, types.DelegationEntry{ + Value: currentPeriodValue, + Staker: staker, + KIndex: indexF, + }) + + // Reset the rewards for the next period back to zero + // and update to the new index + delegationData.CurrentRewards = 0 + delegationData.LatestIndexK = indexF + + if delegationData.LatestIndexWasUndelegation { + k.RemoveDelegationEntry(ctx, previousEntry.Staker, previousEntry.KIndex) + delegationData.LatestIndexWasUndelegation = false + } + + return indexF +} + +// f1CreateDelegator creates a new delegator within the f1-logic. +// It is assumed that no delegator exists. +func (k Keeper) f1CreateDelegator(ctx sdk.Context, staker string, delegator string, amount uint64) { + if amount == 0 { + return + } + + // Fetch metadata + delegationData, found := k.GetDelegationData(ctx, staker) + + // Init default data-set, if this is the first delegator + if !found { + delegationData = types.DelegationData{ + Staker: staker, + } + } + + // Finish current period + k.f1StartNewPeriod(ctx, staker, &delegationData) + + // Update metadata + delegationData.TotalDelegation += amount + delegationData.DelegatorCount += 1 + k.SetDelegationData(ctx, delegationData) + + k.SetDelegator(ctx, types.Delegator{ + Staker: staker, + Delegator: delegator, + InitialAmount: amount, + KIndex: delegationData.LatestIndexK, + }) +} + +// f1RemoveDelegator performs a full undelegation and removes the delegator from the f1-logic +// This method returns the amount of tokens that got undelegated +// Due to slashing the undelegated amount can be lower than the initial delegated amount +func (k Keeper) f1RemoveDelegator(ctx sdk.Context, stakerAddress string, delegatorAddress string) (amount uint64) { + // Check if delegator exists + delegator, found := k.GetDelegator(ctx, stakerAddress, delegatorAddress) + if !found { + return 0 + } + + // Fetch metadata + delegationData, found := k.GetDelegationData(ctx, stakerAddress) + if !found { + // Should never happen, if so there is an error in the f1-implementation + util.PanicHalt(k.upgradeKeeper, ctx, "No delegationData although somebody is delegating") + } + + balance := k.f1GetCurrentDelegation(ctx, stakerAddress, delegatorAddress) + + // Start new period + k.f1StartNewPeriod(ctx, stakerAddress, &delegationData) + + delegationData.LatestIndexWasUndelegation = true + + // Update Metadata + delegationData.TotalDelegation -= balance + delegationData.DelegatorCount -= 1 + + // Remove Delegator + k.RemoveDelegator(ctx, delegator.Staker, delegator.Delegator) + // Remove old entry + k.RemoveDelegationEntry(ctx, stakerAddress, delegator.KIndex) + + // Final cleanup + if delegationData.DelegatorCount == 0 { + k.RemoveDelegationEntry(ctx, stakerAddress, delegationData.LatestIndexK) + } + k.SetDelegationData(ctx, delegationData) + + return balance +} + +// f1Slash performs a slash within the f1-logic. +// It ends the current period and starts a new one with reduced total delegation. +// A slash entry is created which is needed to calculate the correct delegation amount +// of every delegator. +func (k Keeper) f1Slash(ctx sdk.Context, stakerAddress string, fraction sdk.Dec) (amount uint64) { + delegationData, _ := k.GetDelegationData(ctx, stakerAddress) + + // Finish current period because in the new one there will be + // a reduced total delegation for the slashed staker + // The slash will be accounted to the period with index `slashedIndex` + slashedIndex := k.f1StartNewPeriod(ctx, stakerAddress, &delegationData) + + k.SetDelegationSlashEntry(ctx, types.DelegationSlash{ + Staker: stakerAddress, + KIndex: slashedIndex, + Fraction: fraction, + }) + + // remaining_total_delegation = total_delegation * (1 - fraction) + totalDelegation := sdk.NewDec(int64(delegationData.TotalDelegation)) + slashedAmount := totalDelegation.Mul(fraction).TruncateInt().Uint64() + + // Remove slashed amount from delegation metadata + delegationData.TotalDelegation -= slashedAmount + k.SetDelegationData(ctx, delegationData) + + return slashedAmount +} + +// f1WithdrawRewards calculates all outstanding rewards and withdraws them from +// the f1-logic. A new period starts. +func (k Keeper) f1WithdrawRewards(ctx sdk.Context, stakerAddress string, delegatorAddress string) (rewards uint64) { + delegator, found := k.GetDelegator(ctx, stakerAddress, delegatorAddress) + if !found { + return 0 + } + + // Fetch metadata + delegationData, found := k.GetDelegationData(ctx, stakerAddress) + if !found { + util.PanicHalt(k.upgradeKeeper, ctx, "No delegationData although somebody is delegating") + } + + // End current period and use it for calculating the reward + endIndex := k.f1StartNewPeriod(ctx, stakerAddress, &delegationData) + k.SetDelegationData(ctx, delegationData) + + // According to F1 the reward is calculated as the difference between two entries multiplied by the + // delegation amount for the period. + // To incorporate slashing one needs to iterate all slashes and calculate the reward for every period + // separately and then sum it. + reward := sdk.NewDec(0) + k.f1IterateConstantDelegationPeriods(ctx, stakerAddress, delegatorAddress, delegator.KIndex, endIndex, + func(startIndex uint64, endIndex uint64, delegation sdk.Dec) { + // entry difference + difference := k.f1GetEntryDifference(ctx, stakerAddress, startIndex, endIndex) + + periodReward := difference.Mul(delegation) + + reward = reward.Add(periodReward) + }) + + // Delete Delegator entry as he has no outstanding rewards anymore. + // To account for slashes, also update the initial amount. + k.RemoveDelegationEntry(ctx, stakerAddress, delegator.KIndex) + // Delegator now starts at the latest index. + delegator.KIndex = endIndex + delegator.InitialAmount = k.f1GetCurrentDelegation(ctx, delegator.Staker, delegator.Delegator) + k.SetDelegator(ctx, delegator) + + return reward.TruncateInt().Uint64() +} + +// f1IterateConstantDelegationPeriods iterates all periods between minIndex and maxIndex (both inclusive) +// and calls handler() for every period with constant delegation amount +// This method iterates all slashes and additionally calls handler at least once if no slashes occurred +func (k Keeper) f1IterateConstantDelegationPeriods(ctx sdk.Context, stakerAddress string, delegatorAddress string, + minIndex uint64, maxIndex uint64, handler func(startIndex uint64, endIndex uint64, delegation sdk.Dec), +) { + slashes := k.GetAllDelegationSlashesBetween(ctx, stakerAddress, minIndex, maxIndex) + + delegator, _ := k.GetDelegator(ctx, stakerAddress, delegatorAddress) + delegatorBalance := sdk.NewDec(int64(delegator.InitialAmount)) + + if len(slashes) == 0 { + handler(minIndex, maxIndex, delegatorBalance) + return + } + + prevIndex := minIndex + for _, slash := range slashes { + handler(prevIndex, slash.KIndex, delegatorBalance) + slashedAmount := delegatorBalance.MulTruncate(slash.Fraction) + delegatorBalance = delegatorBalance.Sub(slashedAmount) + prevIndex = slash.KIndex + } + handler(prevIndex, maxIndex, delegatorBalance) +} + +// f1GetCurrentDelegation calculates the current delegation of a delegator. +// I.e. the initial amount minus the slashes +func (k Keeper) f1GetCurrentDelegation(ctx sdk.Context, stakerAddress string, delegatorAddress string) uint64 { + delegator, found := k.GetDelegator(ctx, stakerAddress, delegatorAddress) + if !found { + return 0 + } + + // Fetch metadata + delegationData, found := k.GetDelegationData(ctx, stakerAddress) + if !found { + util.PanicHalt(k.upgradeKeeper, ctx, "No delegationData although somebody is delegating") + } + + latestBalance := sdk.NewDec(int64(delegator.InitialAmount)) + k.f1IterateConstantDelegationPeriods(ctx, stakerAddress, delegatorAddress, delegator.KIndex, delegationData.LatestIndexK, + func(startIndex uint64, endIndex uint64, delegation sdk.Dec) { + latestBalance = delegation + }) + + return latestBalance.TruncateInt().Uint64() +} + +// f1GetOutstandingRewards calculates the current outstanding rewards without modifying the f1-state. +// This method can be used for queries. +func (k Keeper) f1GetOutstandingRewards(ctx sdk.Context, stakerAddress string, delegatorAddress string) uint64 { + delegator, found := k.GetDelegator(ctx, stakerAddress, delegatorAddress) + if !found { + return 0 + } + + // Fetch metadata + delegationData, found := k.GetDelegationData(ctx, stakerAddress) + if !found { + util.PanicHalt(k.upgradeKeeper, ctx, "No delegationData although somebody is delegating") + } + + // End current period and use it for calculating the reward + endIndex := delegationData.LatestIndexK + + // According to F1 the reward is calculated as the difference between two entries multiplied by the + // delegation amount for the period. + // To incorporate slashing one needs to iterate all slashes and calculate the reward for every period + // separately and then sum it. + reward := sdk.NewDec(0) + latestBalance := sdk.NewDec(int64(delegator.InitialAmount)) + k.f1IterateConstantDelegationPeriods(ctx, stakerAddress, delegatorAddress, delegator.KIndex, endIndex, + func(startIndex uint64, endIndex uint64, delegation sdk.Dec) { + difference := k.f1GetEntryDifference(ctx, stakerAddress, startIndex, endIndex) + // Multiply with delegation for period + periodReward := difference.Mul(delegation) + // Add to total rewards + reward = reward.Add(periodReward) + + // For calculating the last (ongoing) period + latestBalance = delegation + }) + + // Append missing rewards from last period to ongoing period + entry, found := k.GetDelegationEntry(ctx, stakerAddress, delegationData.LatestIndexK) + if !found { + util.PanicHalt(k.upgradeKeeper, ctx, "Entry does not exist") + } + _ = entry + + currentPeriodValue := sdk.NewDec(0) + if delegationData.TotalDelegation != 0 { + decCurrentRewards := sdk.NewDec(int64(delegationData.CurrentRewards)) + decTotalDelegation := sdk.NewDec(int64(delegationData.TotalDelegation)) + + // F1: $T_f / n_f$ + currentPeriodValue = decCurrentRewards.Quo(decTotalDelegation) + } + + ongoingPeriodReward := currentPeriodValue.Mul(latestBalance) + + reward = reward.Add(ongoingPeriodReward) + return reward.TruncateInt().Uint64() +} + +func (k Keeper) f1GetEntryDifference(ctx sdk.Context, stakerAddress string, lowIndex uint64, highIndex uint64) sdk.Dec { + // entry difference + firstEntry, found := k.GetDelegationEntry(ctx, stakerAddress, lowIndex) + if !found { + util.PanicHalt(k.upgradeKeeper, ctx, "Entry 1 does not exist") + } + + secondEntry, found := k.GetDelegationEntry(ctx, stakerAddress, highIndex) + if !found { + util.PanicHalt(k.upgradeKeeper, ctx, "Entry 2 does not exist") + } + + return secondEntry.Value.Sub(firstEntry.Value) +} diff --git a/x/delegation/keeper/logic_redelegation.go b/x/delegation/keeper/logic_redelegation.go new file mode 100644 index 00000000..945ff381 --- /dev/null +++ b/x/delegation/keeper/logic_redelegation.go @@ -0,0 +1,46 @@ +package keeper + +import ( + sdkErrors "cosmossdk.io/errors" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// consumeRedelegationSpell checks if the user has still redelegation spells +// available. If so, one spell is used and set on a cooldown. +// If all slots are currently on cooldown the function returns an error +func (k Keeper) consumeRedelegationSpell(ctx sdk.Context, address string) error { + // Check if cooldowns are over, + // Remove all expired entries + for _, creationDate := range k.GetRedelegationCooldownEntries(ctx, address) { + if ctx.BlockTime().Unix()-int64(creationDate) > int64(k.GetRedelegationCooldown(ctx)) { + k.RemoveRedelegationCooldown(ctx, address, creationDate) + } else { + break + } + } + + // Get list of active cooldowns + creationDates := k.GetRedelegationCooldownEntries(ctx, address) + + // Check if there are still free slots + if len(creationDates) >= int(k.GetRedelegationMaxAmount(ctx)) { + return sdkErrors.Wrapf(errorsTypes.ErrLogic, types.ErrRedelegationOnCooldown.Error()) + } + + // Check that no Redelegation occurred in this block, as it will lead to errors, as + // the block-time is used for an index key. + if len(creationDates) > 0 && creationDates[len(creationDates)-1] == uint64(ctx.BlockTime().Unix()) { + return sdkErrors.Wrapf(errorsTypes.ErrLogic, types.ErrMultipleRedelegationInSameBlock.Error()) + } + + // All checks passed, create cooldown entry + k.SetRedelegationCooldown(ctx, types.RedelegationCooldown{ + Address: address, + CreationDate: uint64(ctx.BlockTime().Unix()), + }) + + return nil +} diff --git a/x/delegation/keeper/logic_unbonding.go b/x/delegation/keeper/logic_unbonding.go new file mode 100644 index 00000000..4cf886ee --- /dev/null +++ b/x/delegation/keeper/logic_unbonding.go @@ -0,0 +1,87 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// StartUnbondingDelegator creates a queue entry to schedule the unbonding. +// After the DelegationTime is reached the actual unbonding will be performed +// The actual unbonding is then performed by `func ProcessDelegatorUnbondingQueue(...)` +func (k Keeper) StartUnbondingDelegator(ctx sdk.Context, staker string, delegatorAddress string, amount uint64) { + // the queue is ordered by time + queueState := k.GetQueueState(ctx) + + // Increase topIndex as a new entry is about to be appended + queueState.HighIndex += 1 + + k.SetQueueState(ctx, queueState) + + // UnbondingEntry stores all the information which are needed to perform + // the undelegation at the end of the unbonding time + undelegationQueueEntry := types.UndelegationQueueEntry{ + Delegator: delegatorAddress, + Index: queueState.HighIndex, + Staker: staker, + Amount: amount, + CreationTime: uint64(ctx.BlockTime().Unix()), + } + + k.SetUndelegationQueueEntry(ctx, undelegationQueueEntry) +} + +// ProcessDelegatorUnbondingQueue is called in the end block and +// checks the queue for entries that have surpassed the unbonding time. +// If the unbonding time is reached, the actual unbonding is performed +// and the entry is removed from the queue. +func (k Keeper) ProcessDelegatorUnbondingQueue(ctx sdk.Context) { + // Get Queue information + queueState := k.GetQueueState(ctx) + + // flag for computing every entry at the end of the queue which is due. + // start processing the end of the queue + for continueProcessing := true; continueProcessing; { + continueProcessing = false + + // Get end of queue + undelegationEntry, found := k.GetUndelegationQueueEntry(ctx, queueState.LowIndex+1) + + if !found { + if queueState.LowIndex < queueState.HighIndex { + queueState.LowIndex += 1 + continueProcessing = true + } + } else + // Check if unbonding time is over + if undelegationEntry.CreationTime+k.GetUnbondingDelegationTime(ctx) <= uint64(ctx.BlockTime().Unix()) { + + // Perform undelegation and save undelegated amount to then transfer back to the user + undelegatedAmount := k.performUndelegation(ctx, undelegationEntry.Staker, undelegationEntry.Delegator, undelegationEntry.Amount) + + // Transfer the money + if err := util.TransferFromModuleToAddress( + k.bankKeeper, + ctx, + types.ModuleName, + undelegationEntry.Delegator, + undelegatedAmount, + ); err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "Not enough money in delegation module - logic_unbonding") + } + + // Emit a delegation event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventUndelegate{ + Address: undelegationEntry.Delegator, + Staker: undelegationEntry.Staker, + Amount: undelegatedAmount, + }) + + k.RemoveUndelegationQueueEntry(ctx, &undelegationEntry) + + continueProcessing = true + queueState.LowIndex += 1 + } + } + k.SetQueueState(ctx, queueState) +} diff --git a/x/delegation/keeper/msg_server.go b/x/delegation/keeper/msg_server.go new file mode 100644 index 00000000..0db7fbf1 --- /dev/null +++ b/x/delegation/keeper/msg_server.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/delegation/types" +) + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/x/delegation/keeper/msg_server_delegate.go b/x/delegation/keeper/msg_server_delegate.go new file mode 100644 index 00000000..b51dcc07 --- /dev/null +++ b/x/delegation/keeper/msg_server_delegate.go @@ -0,0 +1,40 @@ +package keeper + +import ( + "context" + + sdkErrors "cosmossdk.io/errors" + + "github.com/KYVENetwork/chain/util" + + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Delegate handles the transaction of delegating a specific amount of $KYVE to a staker +// The only requirement for the transaction to succeed is that the staker exists +// and the user has enough balance. +func (k msgServer) Delegate(goCtx context.Context, msg *types.MsgDelegate) (*types.MsgDelegateResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if !k.stakersKeeper.DoesStakerExist(ctx, msg.Staker) { + return nil, sdkErrors.WithType(types.ErrStakerDoesNotExist, msg.Staker) + } + + // Performs logical delegation without transferring the amount + k.performDelegation(ctx, msg.Staker, msg.Creator, msg.Amount) + + // Transfer tokens from sender to this module. + if transferErr := util.TransferFromAddressToModule(k.bankKeeper, ctx, msg.Creator, types.ModuleName, msg.Amount); transferErr != nil { + return nil, transferErr + } + + // Emit a delegation event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventDelegate{ + Address: msg.Creator, + Staker: msg.Staker, + Amount: msg.Amount, + }) + + return &types.MsgDelegateResponse{}, nil +} diff --git a/x/delegation/keeper/msg_server_delegate_test.go b/x/delegation/keeper/msg_server_delegate_test.go new file mode 100644 index 00000000..9160f14d --- /dev/null +++ b/x/delegation/keeper/msg_server_delegate_test.go @@ -0,0 +1,295 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_delegate.go + +* Delegate 10 $KYVE to ALICE +* Delegate additional 50 $KYVE to ALICE +* Try delegating to non-existent staker +* Delegate more than available +* Payout delegators +* Don't pay out rewards twice +* Delegate to validator with 0 $KYVE +* TODO(@max): Delegate to multiple validators + +*/ + +var _ = Describe("msg_server_delegate.go", Ordered, func() { + s := i.NewCleanChain() + + const aliceSelfDelegation = 100 * i.KYVE + const bobSelfDelegation = 200 * i.KYVE + + BeforeEach(func() { + s = i.NewCleanChain() + + CreateFundedPool(s) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.ALICE, + Amount: aliceSelfDelegation, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.BOB, + Amount: bobSelfDelegation, + }) + + _, stakerFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.ALICE) + Expect(stakerFound).To(BeTrue()) + + s.CommitAfterSeconds(7) + }) + + AfterEach(func() { + CheckAndContinueChainForOneMonth(s) + }) + + It("Delegate 10 $KYVE to ALICE", func() { + // ARRANGE + bobBalance := s.GetBalanceFromAddress(i.BOB) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.BOB, + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + // ASSERT + CheckAndContinueChainForOneMonth(s) + bobBalanceAfter := s.GetBalanceFromAddress(i.BOB) + Expect(bobBalanceAfter).To(Equal(bobBalance - 10*i.KYVE)) + + aliceDelegation := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegation).To(Equal(10*i.KYVE + aliceSelfDelegation)) + }) + + It("Delegate 10 $KYVE to ALICE and then another 50 $KYVE", func() { + // ARRANGE + bobBalance := s.GetBalanceFromAddress(i.BOB) + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.BOB, + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + CheckAndContinueChainForOneMonth(s) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.BOB, + Staker: i.ALICE, + Amount: 50 * i.KYVE, + }) + + // ASSERT + CheckAndContinueChainForOneMonth(s) + bobBalanceAfter := s.GetBalanceFromAddress(i.BOB) + Expect(bobBalanceAfter).To(Equal(bobBalance - 60*i.KYVE)) + + aliceDelegation := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegation).To(Equal(60*i.KYVE + aliceSelfDelegation)) + }) + + It("Try delegating to non-existent staker", func() { + // ARRANGE + bobBalance := s.GetBalanceFromAddress(i.BOB) + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorError(&types.MsgDelegate{ + Creator: i.BOB, + Staker: i.CHARLIE, + Amount: 10 * i.KYVE, + }) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.BOB)).To(Equal(bobBalance)) + + aliceDelegation := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegation).To(Equal(aliceSelfDelegation)) + }) + + It("Delegate more than available", func() { + // ARRANGE + bobBalance := s.GetBalanceFromAddress(i.BOB) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorError(&types.MsgDelegate{ + Creator: i.BOB, + Staker: i.ALICE, + Amount: bobBalance + 1, + }) + + // ASSERT + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationBefore).To(Equal(aliceDelegationAfter)) + + bobBalanceAfter := s.GetBalanceFromAddress(i.BOB) + Expect(bobBalanceAfter).To(Equal(bobBalance)) + }) + + It("Payout delegators", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 100 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 209 * i.KYVE, + }) + + poolModuleBalance := s.GetBalanceFromModule(pooltypes.ModuleName) + + Expect(poolModuleBalance).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(BeZero()) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(BeZero()) + + s.PerformValidityChecks() + + // ACT + PayoutRewards(s, i.ALICE, 10*i.KYVE) + + // ASSERT + + // Name amount shares + // Alice: 100 100/(409) * 10 * 1e9 = 2.444.987.775 + // Dummy0: 100 100/(409) * 10 * 1e9 = 2.444.987.775 + // Dummy1: 209 209/(409) * 10 * 1e9 = 5.110.024.449 + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.ALICE)).To(Equal(uint64(2_444_987_775))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(2_444_987_775))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(5_110_024_449))) + + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + }) + + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.ALICE)).To(Equal(uint64(2_444_987_775))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(0))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(5_110_024_449))) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(uint64(900*i.KYVE + 2_444_987_775))) + Expect(s.GetBalanceFromModule(pooltypes.ModuleName)).To(Equal(90 * i.KYVE)) + Expect(s.GetBalanceFromModule(types.ModuleName)).To(Equal((200+409)*i.KYVE + uint64(2_444_987_775+5_110_024_449+1))) + }) + + It("Don't pay out rewards twice", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 100 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 200 * i.KYVE, + }) + + poolModuleBalance := s.GetBalanceFromModule(pooltypes.ModuleName) + + Expect(poolModuleBalance).To(Equal(100 * i.KYVE)) + + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(BeZero()) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(BeZero()) + + // ACT + PayoutRewards(s, i.ALICE, 10*i.KYVE) + + // ASSERT + + // Alice: 100 + // Dummy0: 100 + // Dummy1: 200 + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(2_500_000_000))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(5_000_000_000))) + + s.PerformValidityChecks() + + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + }) + + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + }) + + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(0))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(5_000_000_000))) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(uint64(900*i.KYVE + 2_500_000_000))) + Expect(s.GetBalanceFromModule(pooltypes.ModuleName)).To(Equal(90 * i.KYVE)) + Expect(s.GetBalanceFromModule(types.ModuleName)).To(Equal(600*i.KYVE + 7_500_000_000)) + }) + + It("Delegate to validator with 0 $KYVE", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.CHARLIE, + Amount: 0, + }) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.CHARLIE, + Amount: 200 * i.KYVE, + }) + + // ASSERT + s.PerformValidityChecks() + + poolModuleBalance := s.GetBalanceFromModule(types.ModuleName) + Expect(poolModuleBalance).To(Equal(200*i.KYVE + aliceSelfDelegation + bobSelfDelegation)) + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(800 * i.KYVE)) + + charlieDelegation := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.CHARLIE) + Expect(charlieDelegation).To(Equal(200 * i.KYVE)) + }) + + It("Delegate to multiple validators", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.CHARLIE, + Amount: 0, + }) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.CHARLIE, + Amount: 200 * i.KYVE, + }) + + // ASSERT + s.PerformValidityChecks() + + poolModuleBalance := s.GetBalanceFromModule(types.ModuleName) + Expect(poolModuleBalance).To(Equal(200*i.KYVE + aliceSelfDelegation + bobSelfDelegation)) + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(800 * i.KYVE)) + + charlieDelegation := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.CHARLIE) + Expect(charlieDelegation).To(Equal(200 * i.KYVE)) + }) +}) diff --git a/x/delegation/keeper/msg_server_redelegate.go b/x/delegation/keeper/msg_server_redelegate.go new file mode 100644 index 00000000..44ba7f8c --- /dev/null +++ b/x/delegation/keeper/msg_server_redelegate.go @@ -0,0 +1,60 @@ +package keeper + +import ( + "context" + + sdkErrors "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Redelegate lets a user redelegate from one staker to another staker +// The user has N redelegation spells. When this transaction is executed +// one spell is used. When all spells are consumed the transaction fails. +// The user then needs to wait for the oldest spell to expire to call +// this transaction again. +// It's only possible to redelegate to stakers which are at least in one pool. +func (k msgServer) Redelegate(goCtx context.Context, msg *types.MsgRedelegate) (*types.MsgRedelegateResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Check if the sender is a delegator + if !k.DoesDelegatorExist(ctx, msg.FromStaker, msg.Creator) { + return nil, sdkErrors.WithType(types.ErrNotADelegator, msg.FromStaker) + } + + // Check if destination staker exists + if !k.stakersKeeper.DoesStakerExist(ctx, msg.ToStaker) { + return nil, sdkErrors.WithType(types.ErrStakerDoesNotExist, msg.ToStaker) + } + + if len(k.stakersKeeper.GetValaccountsFromStaker(ctx, msg.ToStaker)) == 0 { + return nil, sdkErrors.WithType(types.ErrRedelegationToInactiveStaker, msg.ToStaker) + } + + // Check if the sender is trying to undelegate more than he has delegated. + if delegationAmount := k.GetDelegationAmountOfDelegator(ctx, msg.FromStaker, msg.Creator); msg.Amount > delegationAmount { + return nil, types.ErrNotEnoughDelegation.Wrapf("%d > %d", msg.Amount, delegationAmount) + } + + // Only errors if all spells are currently on cooldown + if err := k.consumeRedelegationSpell(ctx, msg.Creator); err != nil { + return nil, err + } + + // The redelegation is translated into an undelegation from the old staker ... + if actualAmount := k.performUndelegation(ctx, msg.FromStaker, msg.Creator, msg.Amount); actualAmount != msg.Amount { + return nil, types.ErrNotEnoughDelegation.Wrapf("%d != %d", msg.Amount, actualAmount) + } + // ... and a new delegation to the new staker + k.performDelegation(ctx, msg.ToStaker, msg.Creator, msg.Amount) + + // Emit a delegation event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventRedelegate{ + Address: msg.Creator, + FromStaker: msg.FromStaker, + ToStaker: msg.ToStaker, + Amount: msg.Amount, + }) + + return &types.MsgRedelegateResponse{}, nil +} diff --git a/x/delegation/keeper/msg_server_redelegate_test.go b/x/delegation/keeper/msg_server_redelegate_test.go new file mode 100644 index 00000000..b56b7f0c --- /dev/null +++ b/x/delegation/keeper/msg_server_redelegate_test.go @@ -0,0 +1,336 @@ +package keeper_test + +import ( + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/delegation/types" +) + +/* + +TEST CASES - msg_server_redelegate.go + +* Redelegate 1 KYVE to Bob +* Redelegate more than delegated +* Redelegate without delegation +* Redelegate to non-existent staker +* Exhaust all redelegation spells +* Expire redelegation spells + +*/ + +var _ = Describe("Delegation - Redelegation", Ordered, func() { + s := i.NewCleanChain() + + aliceSelfDelegation := 100 * i.KYVE + bobSelfDelegation := 100 * i.KYVE + + BeforeEach(func() { + s = i.NewCleanChain() + + CreateFundedPool(s) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "EnabledPool", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + Disabled: false, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.ALICE, + Amount: aliceSelfDelegation, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.BOB, + Amount: bobSelfDelegation, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.BOB, + PoolId: 1, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + _, stakerFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.ALICE) + Expect(stakerFound).To(BeTrue()) + + _, stakerFound = s.App().StakersKeeper.GetStaker(s.Ctx(), i.BOB) + Expect(stakerFound).To(BeTrue()) + + s.CommitAfterSeconds(7) + }) + + AfterEach(func() { + CheckAndContinueChainForOneMonth(s) + }) + + It("Redelegate 1 KYVE to Bob", func() { + // Arrange + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + bobDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(bobDelegationBefore).To(Equal(bobSelfDelegation)) + + // Act + s.RunTxDelegatorSuccess(&types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.BOB, + Amount: 1 * i.KYVE, + }) + s.CommitAfterSeconds(10) + + // Assert + CheckAndContinueChainForOneMonth(s) + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationBefore).To(Equal(aliceDelegationAfter + 1*i.KYVE)) + + bobDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(bobDelegationBefore).To(Equal(bobDelegationAfter - 1*i.KYVE)) + }) + + It("Redelegate more than delegated", func() { + // Arrange + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + bobDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(bobDelegationBefore).To(Equal(bobSelfDelegation)) + s.PerformValidityChecks() + + // Act + s.RunTxDelegatorError(&types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.BOB, + Amount: 11 * i.KYVE, + }) + s.CommitAfterSeconds(10) + + // Assert + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationBefore).To(Equal(aliceDelegationAfter)) + + bobDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(bobDelegationBefore).To(Equal(bobDelegationAfter)) + }) + + It("Redelegate without delegation", func() { + // Arrange + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + bobDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation)) + Expect(bobDelegationBefore).To(Equal(bobSelfDelegation)) + s.PerformValidityChecks() + + // Act + s.RunTxDelegatorError(&types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.CHARLIE, + Amount: 1 * i.KYVE, + }) + + // Assert + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationBefore).To(Equal(aliceDelegationAfter)) + + bobDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(bobDelegationBefore).To(Equal(bobDelegationAfter)) + }) + + It("Redelegate to non-existent staker", func() { + // Arrange + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + bobDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(bobDelegationBefore).To(Equal(bobSelfDelegation)) + s.PerformValidityChecks() + + // Act + s.RunTxDelegatorError(&types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.CHARLIE, + Amount: 1 * i.KYVE, + }) + + // Assert + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationBefore).To(Equal(aliceDelegationAfter)) + + bobDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(bobDelegationBefore).To(Equal(bobDelegationAfter)) + }) + + It("Try to redelegate to inactive-staker staker", func() { + // Arrange + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.CHARLIE, + Amount: 100 * i.KYVE, + }) + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + charlieDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.CHARLIE) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(charlieDelegationBefore).To(Equal(100 * i.KYVE)) + s.PerformValidityChecks() + + // Act + s.RunTxDelegatorError(&types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.CHARLIE, + Amount: 1 * i.KYVE, + }) + + // Assert + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationBefore).To(Equal(aliceDelegationAfter)) + + Expect(charlieDelegationBefore).To(Equal(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.CHARLIE))) + }) + + It("Exhaust all redelegation spells", func() { + // Arrange + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + bobDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(bobDelegationBefore).To(Equal(bobSelfDelegation)) + s.PerformValidityChecks() + + // Act + redelegationMessage := types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.BOB, + Amount: 1 * i.KYVE, + } + + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + + // Assert + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationAfter).To(Equal(aliceSelfDelegation + 5*i.KYVE)) + + bobDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(bobDelegationAfter).To(Equal(bobSelfDelegation + 5*i.KYVE)) + + // Expect to fail. + // Now all redelegation spells are exhausted + s.RunTxDelegatorError(&redelegationMessage) + }) + + It("Expire redelegation spells", func() { + // Arrange + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + aliceDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + bobDelegationBefore := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(aliceDelegationBefore).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(bobDelegationBefore).To(Equal(bobSelfDelegation)) + + redelegationMessage := types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.BOB, + Amount: 1 * i.KYVE, + } + + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(10) + s.PerformValidityChecks() + + // Act + s.CommitAfterSeconds(s.App().DelegationKeeper.GetRedelegationCooldown(s.Ctx()) - 50) + s.CommitAfterSeconds(1) + + // One redelegation available + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(1) + + // Redelegations are now all used again + s.RunTxDelegatorError(&redelegationMessage) + s.PerformValidityChecks() + + // Act 2 + + // Expire next two spells + s.CommitAfterSeconds(25) + s.RunTxDelegatorSuccess(&redelegationMessage) + // No two delegation within same block + s.RunTxDelegatorError(&redelegationMessage) + s.CommitAfterSeconds(1) + s.RunTxDelegatorSuccess(&redelegationMessage) + s.CommitAfterSeconds(1) + + // Assert + aliceDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE) + Expect(aliceDelegationAfter).To(Equal(aliceSelfDelegation + 2*i.KYVE)) + + bobDelegationAfter := s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB) + Expect(bobDelegationAfter).To(Equal(bobSelfDelegation + 8*i.KYVE)) + + // Expect to fail. + // Now all redelegation spells are exhausted + s.RunTxDelegatorError(&redelegationMessage) + }) +}) diff --git a/x/delegation/keeper/msg_server_undelegate.go b/x/delegation/keeper/msg_server_undelegate.go new file mode 100644 index 00000000..d9e1478d --- /dev/null +++ b/x/delegation/keeper/msg_server_undelegate.go @@ -0,0 +1,26 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Undelegate handles the transaction of undelegating a given amount from the delegated tokens +// The Undelegation is not performed immediately, instead an unbonding entry is created and pushed +// to a queue. When the unbonding timeout is reached the actual undelegation is performed. +// If the delegator got slashed during the unbonding only the remaining tokens will be returned. +func (k msgServer) Undelegate(goCtx context.Context, msg *types.MsgUndelegate) (*types.MsgUndelegateResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Do not allow to undelegate more than currently delegated + if delegationAmount := k.GetDelegationAmountOfDelegator(ctx, msg.Staker, msg.Creator); msg.Amount > delegationAmount { + return nil, types.ErrNotEnoughDelegation.Wrapf("%d > %d", msg.Amount, delegationAmount) + } + + // Create and insert unbonding queue entry. + k.StartUnbondingDelegator(ctx, msg.Staker, msg.Creator, msg.Amount) + + return &types.MsgUndelegateResponse{}, nil +} diff --git a/x/delegation/keeper/msg_server_undelegate_test.go b/x/delegation/keeper/msg_server_undelegate_test.go new file mode 100644 index 00000000..93cabec4 --- /dev/null +++ b/x/delegation/keeper/msg_server_undelegate_test.go @@ -0,0 +1,581 @@ +package keeper_test + +import ( + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/delegation/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_undelegate.go + +* Undelegate more $KYVE than allowed +* Start undelegation; Check unbonding queue state +* Start undelegation and await unbonding +* Redelegation during undelegation unbonding +* Undelegate Slashed Amount +* Delegate twice and undelegate twice +* Delegate twice and undelegate twice and await unbonding +* Undelegate all after rewards and slashing +* JoinA, Slash, JoinB, PayoutReward +* Slash twice +* Start unbonding, slash twice, payout, await undelegation + +TODO(@max): joinA slash joinB slash -> remaining delegation + +*/ + +var _ = Describe("msg_server_undelegate.go", Ordered, func() { + s := i.NewCleanChain() + + const aliceSelfDelegation = 100 * i.KYVE + const bobSelfDelegation = 100 * i.KYVE + + BeforeEach(func() { + s = i.NewCleanChain() + + CreateFundedPool(s) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.ALICE, + Amount: aliceSelfDelegation, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.BOB, + Amount: bobSelfDelegation, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "DisabledPool", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + Disabled: true, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.BOB, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + _, aliceFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.ALICE) + Expect(aliceFound).To(BeTrue()) + + _, bobFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.BOB) + Expect(bobFound).To(BeTrue()) + + s.CommitAfterSeconds(7) + }) + + AfterEach(func() { + CheckAndContinueChainForOneMonth(s) + }) + + It("Undelegate more $KYVE than allowed", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorError(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 11 * i.KYVE, + }) + + // ASSERT + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(s.Ctx(), i.DUMMY[0])).To(BeEmpty()) + }) + + It("Start undelegation; Check unbonding queue state", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 5 * i.KYVE, + }) + + s.CommitAfterSeconds(1) + + // ASSERT + + // Delegation amount stays the same (due to unbonding) + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + unbondingEntries := s.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(s.Ctx(), i.DUMMY[0]) + Expect(unbondingEntries).To(HaveLen(1)) + Expect(unbondingEntries[0].Staker).To(Equal(i.ALICE)) + Expect(unbondingEntries[0].Delegator).To(Equal(i.DUMMY[0])) + Expect(unbondingEntries[0].Amount).To(Equal(5 * i.KYVE)) + Expect(unbondingEntries[0].CreationTime).To(Equal(uint64(s.Ctx().BlockTime().Unix() - 1))) + }) + + It("Start undelegation and await unbonding", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 5 * i.KYVE, + }) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx()) + 1) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(995 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 5*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(5 * i.KYVE)) + + unbondingEntries := s.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(s.Ctx(), i.DUMMY[0]) + Expect(unbondingEntries).To(BeEmpty()) + }) + + It("Redelegation during undelegation unbonding", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 5 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgRedelegate{ + Creator: i.DUMMY[0], + FromStaker: i.ALICE, + ToStaker: i.BOB, + Amount: 10 * i.KYVE, + }) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx()) + 1) + s.CommitAfterSeconds(1) + + // ASSERT + + // Unbonding should have had no effect + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(0 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.BOB, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + unbondingEntries := s.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(s.Ctx(), i.DUMMY[0]) + Expect(unbondingEntries).To(BeEmpty()) + }) + + It("Undelegate Slashed Amount", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + params := s.App().DelegationKeeper.GetParams(s.Ctx()) + params.UploadSlash = "0.1" + s.App().DelegationKeeper.SetParams(s.Ctx(), params) + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(9 * i.KYVE)) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx()) + 1) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(999 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(0 * i.KYVE)) + }) + + It("Delegate twice and undelegate twice", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.PerformValidityChecks() + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(980 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 30*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(20 * i.KYVE)) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 5 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 8 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + // ASSERT + unbondingEntries := s.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntries(s.Ctx()) + + Expect(unbondingEntries).To(HaveLen(2)) + Expect(unbondingEntries[0].Staker).To(Equal(i.ALICE)) + Expect(unbondingEntries[0].Delegator).To(Equal(i.DUMMY[0])) + Expect(unbondingEntries[0].Amount).To(Equal(5 * i.KYVE)) + Expect(unbondingEntries[0].CreationTime).To(Equal(uint64(s.Ctx().BlockTime().Unix() - 20))) + + Expect(unbondingEntries[1].Staker).To(Equal(i.ALICE)) + Expect(unbondingEntries[1].Delegator).To(Equal(i.DUMMY[1])) + Expect(unbondingEntries[1].Amount).To(Equal(8 * i.KYVE)) + Expect(unbondingEntries[1].CreationTime).To(Equal(uint64(s.Ctx().BlockTime().Unix() - 10))) + }) + + It("Delegate twice and undelegate twice and await unbonding", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(980 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 30*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(20 * i.KYVE)) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 5 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 8 * i.KYVE, + }) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx()) + 1) + s.CommitAfterSeconds(1) + + // ASSERT + unbondingEntries := s.App().DelegationKeeper.GetAllUnbondingDelegationQueueEntries(s.Ctx()) + Expect(unbondingEntries).To(BeEmpty()) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(995 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(5 * i.KYVE)) + + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(988 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(12 * i.KYVE)) + }) + + It("Undelegate all after rewards and slashing", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 10*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(10 * i.KYVE)) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(980 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 30*i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(20 * i.KYVE)) + + s.PerformValidityChecks() + + // Payout rewards + // Alice: 100 100/130 * 10 * 1e9 = 7_692_307_692 + // Dummy0: 10 10/130 * 10 * 1e9 = 769_230_769 + // Dummy1: 20 20/130 * 10 * 1e9 = 1_538_461_538 + PayoutRewards(s, i.ALICE, 10*i.KYVE) + + // Collect + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + }) + + // Slash 10% + params := s.App().DelegationKeeper.GetParams(s.Ctx()) + params.UploadSlash = "0.1" + s.App().DelegationKeeper.SetParams(s.Ctx(), params) + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(9 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(18 * i.KYVE)) + + // ACT + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 9 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 18 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx()) + 1) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(999*i.KYVE + uint64(769_230_769))) + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(998*i.KYVE + uint64(1_538_461_538))) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(uint64(float64(aliceSelfDelegation) * 0.9))) + + delegationEntries := s.App().DelegationKeeper.GetAllDelegationEntries(s.Ctx()) + delegators := s.App().DelegationKeeper.GetAllDelegators(s.Ctx()) + slashes := s.App().DelegationKeeper.GetAllDelegationSlashEntries(s.Ctx()) + + Expect(len(slashes)).To(Equal(1)) + Expect(len(delegationEntries)).To(Equal(4)) + Expect(delegators).To(HaveLen(2)) + }) + + It("JoinA, Slash, JoinB, PayoutReward", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + params := s.App().DelegationKeeper.GetParams(s.Ctx()) + params.UploadSlash = "0.5" + s.App().DelegationKeeper.SetParams(s.Ctx(), params) + s.PerformValidityChecks() + + // Slash 50% + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + // Dummy0: 5$KYVE Dummy1: 20$KYVE + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal((50 + 25) * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(5 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(20 * i.KYVE)) + + // ACT + + // Alice: 50 50 / 75 * 10 * 1e9 = 6_666_666_666 + // Dummy0: 5 5 / 75 * 10 * 1e9 = 666_666_666 + // Dummy1: 20 20 / 75 * 10 * 1e9 = 2_666_666_666 + PayoutRewards(s, i.ALICE, 10*i.KYVE) + + // ASSERT + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(666_666_666))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(2_666_666_666))) + }) + + It("Slash twice", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + s.PerformValidityChecks() + + // ACT + params := s.App().DelegationKeeper.GetParams(s.Ctx()) + params.UploadSlash = "0.5" + s.App().DelegationKeeper.SetParams(s.Ctx(), params) + + // Slash 50% twice + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + + // ASSERT + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(25*i.KYVE + uint64(2_500_000_000+5_000_000_000))) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(2_500_000_000))) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(5_000_000_000))) + }) + + It("Start unbonding, slash twice, payout, await undelegation", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + s.CommitAfterSeconds(10) + + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgUndelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 20 * i.KYVE, + }) + + s.PerformValidityChecks() + + // ACT + params := s.App().DelegationKeeper.GetParams(s.Ctx()) + params.UploadSlash = "0.5" + s.App().DelegationKeeper.SetParams(s.Ctx(), params) + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + s.App().DelegationKeeper.SlashDelegators(s.Ctx(), 0, i.ALICE, types.SLASH_TYPE_UPLOAD) + + // Alice: 25 25 / 32.5 * 1e10 = 7_692_307_692 + // Dummy0: 2.5 2.5 / 32.5 * 1e10 = 769_230_769 + // Dummy1: 5 5 / 32.5 * 1e10 = 1_538_461_538 + PayoutRewards(s, i.ALICE, 10*i.KYVE) + + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx()) + 1) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(25 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[0])).To(BeZero()) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.ALICE, i.DUMMY[1])).To(BeZero()) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(uint64(1000e9 - 7_500_000_000 + 769_230_769))) + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(uint64(1000e9 - 15_000_000_000 + 1_538_461_538))) + }) +}) diff --git a/x/delegation/keeper/msg_server_update_params.go b/x/delegation/keeper/msg_server_update_params.go new file mode 100644 index 00000000..b77b5220 --- /dev/null +++ b/x/delegation/keeper/msg_server_update_params.go @@ -0,0 +1,34 @@ +package keeper + +import ( + "context" + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + + // Delegation + "github.com/KYVENetwork/chain/x/delegation/types" + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" +) + +// UpdateParams is a governance message to update module-wide parameters. +// req.payload is a valid json string +// This is already checked and validated by the `types/params.go` +// Only the provided properties will be updated, the rest remains the same. +func (k msgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + params := k.GetParams(ctx) + + payload := params + _ = json.Unmarshal([]byte(req.Payload), &payload) + k.SetParams(ctx, payload) + + return &types.MsgUpdateParamsResponse{}, nil +} diff --git a/x/delegation/keeper/msg_server_update_params_test.go b/x/delegation/keeper/msg_server_update_params_test.go new file mode 100644 index 00000000..caa8990c --- /dev/null +++ b/x/delegation/keeper/msg_server_update_params_test.go @@ -0,0 +1,676 @@ +package keeper_test + +import ( + "fmt" + + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Delegation + "github.com/KYVENetwork/chain/x/delegation/types" + // Gov + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" +) + +/* + +TEST CASES - msg_server_update_params.go + +* Check default params +* Invalid authority (transaction) +* Invalid authority (proposal) +* Update every param at once +* Update no param +* Update with invalid formatted payload + +* Update unbonding delegation time +* Update unbonding delegation time with invalid value + +* Update redelegation cooldown +* Update redelegation cooldown with invalid value + +* Update redelegation max amount +* Update redelegation max amount with invalid value + +* Update vote slash +* Update vote slash with invalid value + +* Update upload slash +* Update upload slash with invalid value + +* Update timeout slash +* Update timeout slash with invalid value + +*/ + +var _ = Describe("msg_server_update_params.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + + minDeposit := s.App().GovKeeper.GetDepositParams(s.Ctx()).MinDeposit + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter := sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + + BeforeEach(func() { + s = i.NewCleanChain() + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter = sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Check default params", func() { + // ASSERT + params := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(params.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(params.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(params.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(params.VoteSlash).To(Equal(types.DefaultVoteSlash)) + Expect(params.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(params.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, err := s.RunTx(proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Update every param at once", func() { + // ARRANGE + payload := `{ + "unbonding_delegation_time": 3600, + "redelegation_cooldown": 3600, + "redelegation_max_amount": 1, + "vote_slash": "0.05", + "upload_slash": "0.05", + "timeout_slash": "0.05" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(uint64(3600))) + Expect(updatedParams.RedelegationCooldown).To(Equal(uint64(3600))) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(uint64(1))) + Expect(updatedParams.VoteSlash).To(Equal("0.05")) + Expect(updatedParams.UploadSlash).To(Equal("0.05")) + Expect(updatedParams.TimeoutSlash).To(Equal("0.05")) + }) + + It("Update no param", func() { + // ARRANGE + payload := `{}` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + }) + + It("Update with invalid formatted payload", func() { + // ARRANGE + payload := `{ + "unbonding_delegation_time": 3600, + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + }) + + It("Update unbonding delegation time", func() { + // ARRANGE + payload := `{ + "unbonding_delegation_time": 3600 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(uint64(3600))) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update unbonding delegation time with invalid value", func() { + // ARRANGE + payload := `{ + "unbonding_delegation_time": "invalid" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update redelegation cooldown", func() { + // ARRANGE + payload := `{ + "redelegation_cooldown": 3600 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationCooldown).To(Equal(uint64(3600))) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update redelegation cooldown with invalid value", func() { + // ARRANGE + payload := `{ + "redelegation_cooldown": "invalid" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update Update redelegation max amount", func() { + // ARRANGE + payload := `{ + "redelegation_max_amount": 1 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(uint64(1))) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update Update redelegation max amount with invalid value", func() { + // ARRANGE + payload := `{ + "redelegation_max_amount": -2 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update vote slash", func() { + // ARRANGE + payload := `{ + "vote_slash": "0.05" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal("0.05")) + }) + + It("Update vote slash with invalid value", func() { + // ARRANGE + payload := `{ + "vote_slash": "invalid" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + fmt.Println(msg.ValidateBasic()) + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update upload slash", func() { + // ARRANGE + payload := `{ + "upload_slash": "0.05" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal("0.05")) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update upload slash with invalid value", func() { + // ARRANGE + payload := `{ + "upload_slash": "1.5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update timeout slash", func() { + // ARRANGE + payload := `{ + "timeout_slash": "0.05" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal("0.05")) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) + + It("Update timeout slash with invalid value", func() { + // ARRANGE + payload := `{ + "upload_slash": "-0.5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().DelegationKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.UnbondingDelegationTime).To(Equal(types.DefaultUnbondingDelegationTime)) + Expect(updatedParams.RedelegationCooldown).To(Equal(types.DefaultRedelegationCooldown)) + Expect(updatedParams.RedelegationMaxAmount).To(Equal(types.DefaultRedelegationMaxAmount)) + Expect(updatedParams.UploadSlash).To(Equal(types.DefaultUploadSlash)) + Expect(updatedParams.TimeoutSlash).To(Equal(types.DefaultTimeoutSlash)) + Expect(updatedParams.VoteSlash).To(Equal(types.DefaultVoteSlash)) + }) +}) diff --git a/x/delegation/keeper/msg_server_withdraw_rewards.go b/x/delegation/keeper/msg_server_withdraw_rewards.go new file mode 100644 index 00000000..b23812c9 --- /dev/null +++ b/x/delegation/keeper/msg_server_withdraw_rewards.go @@ -0,0 +1,39 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + + sdkErrors "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// WithdrawRewards calculates the current rewards of a delegator and transfers the balance to +// the delegator's wallet. Only the delegator himself can call this transaction. +func (k msgServer) WithdrawRewards(goCtx context.Context, msg *types.MsgWithdrawRewards) (*types.MsgWithdrawRewardsResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Check if the sender has delegated to the given staker + if !k.DoesDelegatorExist(ctx, msg.Staker, msg.Creator) { + return nil, sdkErrors.WithType(types.ErrNotADelegator, msg.Creator) + } + + // Withdraw all rewards of the sender. + reward := k.f1WithdrawRewards(ctx, msg.Staker, msg.Creator) + + // Transfer reward $KYVE from this module to sender. + if err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, msg.Creator, reward); err != nil { + return nil, err + } + + // Emit a delegation event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventWithdrawRewards{ + Address: msg.Creator, + Staker: msg.Staker, + Amount: reward, + }) + + return &types.MsgWithdrawRewardsResponse{}, nil +} diff --git a/x/delegation/keeper/msg_server_withdraw_rewards_test.go b/x/delegation/keeper/msg_server_withdraw_rewards_test.go new file mode 100644 index 00000000..fc062d4a --- /dev/null +++ b/x/delegation/keeper/msg_server_withdraw_rewards_test.go @@ -0,0 +1,166 @@ +package keeper_test + +import ( + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/delegation/types" +) + +/* + +TEST CASES - msg_server_withdraw_rewards.go + +* Payout rewards which cause rounding issues and withdraw +* Withdraw from a non-existing delegator +* Test invalid payouts to delegators + +* TODO(@max): Test withdrawal of rewards which are zero +* TODO(@max): Test withdrawal of rewards with multiple slashes + +*/ + +var _ = Describe("msg_server_withdraw_rewards.go", Ordered, func() { + s := i.NewCleanChain() + + const aliceSelfDelegation = 0 * i.KYVE + const bobSelfDelegation = 0 * i.KYVE + + BeforeEach(func() { + s = i.NewCleanChain() + + CreateFundedPool(s) + + // Stake + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.ALICE, + Amount: aliceSelfDelegation, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.BOB, + Amount: bobSelfDelegation, + }) + + _, stakerFound := s.App().StakersKeeper.GetStaker(s.Ctx(), i.ALICE) + Expect(stakerFound).To(BeTrue()) + + _, stakerFound = s.App().StakersKeeper.GetStaker(s.Ctx(), i.BOB) + Expect(stakerFound).To(BeTrue()) + }) + + AfterEach(func() { + CheckAndContinueChainForOneMonth(s) + }) + + It("Payout rewards which cause rounding issues and withdraw", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&types.MsgDelegate{ + Creator: i.DUMMY[2], + Staker: i.ALICE, + Amount: 10 * i.KYVE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(990 * i.KYVE)) + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(990 * i.KYVE)) + Expect(s.GetBalanceFromAddress(i.DUMMY[2])).To(Equal(990 * i.KYVE)) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.ALICE)).To(Equal(aliceSelfDelegation + 30*i.KYVE)) + + delegationModuleBalanceBefore := s.GetBalanceFromModule(types.ModuleName) + poolModuleBalanceBefore := s.GetBalanceFromModule(pooltypes.ModuleName) + s.PerformValidityChecks() + + // ACT + + // Alice: 100 + // Dummy0: 10 + // Dummy1: 0 + PayoutRewards(s, i.ALICE, 20*i.KYVE) + + // ASSERT + delegationModuleBalanceAfter := s.GetBalanceFromModule(types.ModuleName) + poolModuleBalanceAfter := s.GetBalanceFromModule(pooltypes.ModuleName) + + Expect(delegationModuleBalanceAfter).To(Equal(delegationModuleBalanceBefore + 20*i.KYVE)) + Expect(poolModuleBalanceAfter).To(Equal(poolModuleBalanceBefore - 20*i.KYVE)) + + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[0])).To(Equal(uint64(6666666666))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[1])).To(Equal(uint64(6666666666))) + Expect(s.App().DelegationKeeper.GetOutstandingRewards(s.Ctx(), i.ALICE, i.DUMMY[2])).To(Equal(uint64(6666666666))) + + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[0], + Staker: i.ALICE, + }) + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[1], + Staker: i.ALICE, + }) + s.RunTxDelegatorSuccess(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[2], + Staker: i.ALICE, + }) + + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(uint64(996666666666))) + Expect(s.GetBalanceFromAddress(i.DUMMY[1])).To(Equal(uint64(996666666666))) + Expect(s.GetBalanceFromAddress(i.DUMMY[2])).To(Equal(uint64(996666666666))) + + Expect(s.GetBalanceFromModule(types.ModuleName)).To(Equal(uint64(30000000002))) + }) + + It("Withdraw from a non-existing delegator", func() { + // ARRANGE + balanceDummy1Before := s.GetBalanceFromAddress(i.DUMMY[0]) + balanceCharlieBefore := s.GetBalanceFromAddress(i.CHARLIE) + balanceAliceBefore := s.GetBalanceFromAddress(i.ALICE) + delegationBalance := s.GetBalanceFromModule(types.ModuleName) + + s.PerformValidityChecks() + + // ACT + s.RunTxDelegatorError(&types.MsgWithdrawRewards{ + Creator: i.CHARLIE, + Staker: i.ALICE, + }) + + s.RunTxDelegatorError(&types.MsgWithdrawRewards{ + Creator: i.DUMMY[0], + Staker: i.CHARLIE, + }) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.DUMMY[0])).To(Equal(balanceDummy1Before)) + Expect(s.GetBalanceFromAddress(i.CHARLIE)).To(Equal(balanceCharlieBefore)) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(balanceAliceBefore)) + Expect(s.GetBalanceFromModule(types.ModuleName)).To(Equal(delegationBalance)) + }) + + It("Test invalid payouts to delegators", func() { + // ARRANGE + forkedCtx, _ := s.Ctx().CacheContext() + + // ACT + success1 := s.App().DelegationKeeper.PayoutRewards(forkedCtx, i.ALICE, 20000*i.KYVE, pooltypes.ModuleName) + success2 := s.App().DelegationKeeper.PayoutRewards(s.Ctx(), i.DUMMY[20], 0*i.KYVE, pooltypes.ModuleName) + + // ASSERT + Expect(success1).To(BeFalse()) + Expect(success2).To(BeFalse()) + }) +}) diff --git a/x/delegation/module.go b/x/delegation/module.go new file mode 100644 index 00000000..d9a9be8e --- /dev/null +++ b/x/delegation/module.go @@ -0,0 +1,161 @@ +package delegation + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/KYVENetwork/chain/x/delegation/client/cli" + "github.com/KYVENetwork/chain/x/delegation/keeper" + "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + _ = types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.InitMemStore(ctx) +} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + am.keeper.ProcessDelegatorUnbondingQueue(ctx) + return []abci.ValidatorUpdate{} +} diff --git a/x/delegation/spec/01_concepts.md b/x/delegation/spec/01_concepts.md new file mode 100644 index 00000000..061e6caa --- /dev/null +++ b/x/delegation/spec/01_concepts.md @@ -0,0 +1,45 @@ + + +# Concepts + +The KYVE Protocol layer implements staking and delegation similar to +Tendermint. Validators that want to participate in the protocol need to stake +$KYVE to have voting power and join different KYVE storage pools. + +## Code Structure + +This module adheres to our global coding structure, defined [here](../../../CodeStructure.md). + +## Delegation + +Users who want to support a validator can delegate their $KYVE. This validator +is often referred to in our codebase as a `staker`. + +If a validator delegates to itself, this is called self-delegation. From a +technical point of view, self and user delegations are treated as the same. + +## F1 Distribution + +Because there is no limit to the number of validators, a direct payout of each +reward would cost an outrageous amount of gas. We have turned to the +"F1 Fee Distribution" algorithm to solve this issue. It handles the delegation +itself, payouts of rewards, and slashing events. + +The main idea is that if there is no change to delegation distribution (in +other words, no new delegations or undelegations), there is no need to payout +rewards. When users want to withdraw their rewards or update their +delegation amount, their rewards are calculated and correctly distributed. + +## Rewards and Slashes + +Users who delegate tokens to a validator will receive a portion of its rewards. +These rewards are generated when the validator produces valid data bundles in a +KYVE storage pool. On the other hand, these delegations are also subject to +slashing events if the validator misbehaves. + +# References + +[1] D. Ohja, C. Goes. F1 Fee Distribution. +In *International Conference on Blockchain Economics, Security and Protocols*, *pages 10:1-10:6*, 2019. URL: `https://d-nb.info/1208239872/34` diff --git a/x/delegation/spec/02_state.md b/x/delegation/spec/02_state.md new file mode 100644 index 00000000..0f2164d8 --- /dev/null +++ b/x/delegation/spec/02_state.md @@ -0,0 +1,145 @@ + + +# State + +The module is mainly responsible for handling the f1-distribution state. +Furthermore, it is also responsible for unbonding and redelegation. + +## F1-Distribution +The state is split across four proto-files which all have their own +prefix in the KV-Store. + +### DelegationData +DelegationData exist for every staker and stores primarily +total delegation, the rewards for the current period and keeps track +of the f1-index. It exists as long as the staker has at least `1ukyve` delegation. + +- DelegationData: `0x03 | StakerAddr -> ProtocolBuffer(stakerDelegationData)` + +```go +type DelegationData struct { + Staker string + // F1 + CurrentRewards uint64 + TotalDelegation uint64 + LatestIndexK uint64 + // delegator_count the amount of different addresses delegating to the staker + DelegatorCount uint64 + // latest_index_was_undelegation helps indicates when an entry can be deleted + LatestIndexWasUndelegation bool +} +``` + +### Delegator +Delegator represents a pair of (staker, delegator) and the corresponding f1-index. + +- Delegator: `0x01 | 0x00 | StakerAddr | DelegatorAddr -> ProtocolBuffer(delegator)` + +One additional index is maintained to query for all stakers a delegator has delegated to: + +- DelegatorIndex2: `0x01 | 0x01 | DelegatorAddr | StakerAddr -> ProtocolBuffer(delegator)` + +```go +type Delegator struct { + Staker string + Delegator string + KIndex uint64 + InitialAmount uint64 +} +``` + +### DelegationEntry +DelegationEntries are used internally by the f1-distribution. +They mark the beginning of every period. + +- DelegationEntry: `0x02 | StakerAddr | kIndex -> ProtocolBuffer(delegationEntry)` + +```go +type DelegationEntry struct { + Staker string + KIndex uint64 + Value sdk.Dec +} +``` + +### DelegationSlash +DelegationSlash represents an internal f1-slash. +It is needed to calculate the actual amount of stake +after a slash occurred. + +- DelegationSlash: `0x04 | StakerAddr | kIndex -> ProtocolBuffer(delegationSlash)` + +```go +type DelegationSlash struct { + Staker string + KIndex uint64 + Fraction sdk.Dec +} +``` + +## Unbonding Queue + +### QueueState +For the unbonding queue the app needs to keep track of the head (HighIndex) and +tail (LowIndex) of the queue. New entries are appended to the +head. The EndBlocker checks the tail if entries are due and processes them. + +- QueueState: `0x05 -> ProtocolBuffer(queueState)` + +```go +type DelegationSlash struct { + LowIndex uint64 + HighIndex uint64 +} +``` + +### UndelegationQueueEntry +Every time a user starts an undelegation an entry is created +and appended to the head of the queue. I.e. the current HighIndex is +incremented and assigned to the entry. +The order of the queue is automatically provided by the KV-Store. + +- UndelegationQueueEntry: `0x06 | 0x00 | Index -> ProtocolBuffer(undelegationQueueEntry)` + +A second index is provided so that users can query their own pending entries +without iterating the entire queue. + +- UndelegationQueueEntryIndex2: `0x06 | 0x01 | StakerAddr | Index -> ProtocolBuffer(undelegationQueueEntry)` + + +```go +type UndelegationQueueEntry struct { + Index uint64 + Staker string + Delegator string + Amount uint64 + CreationTime uint64 +} +``` + + +## Redelegation Spells + +Redelegation spells do not require a queue for tracking expired +spells, as they are checked on demand when the users trys to +redelegate. + +### RedelegationCooldown + +Every used redelegation spell is stored in the KV-Store with its creation time. +Once the oldest entry is older then `RedelegationCooldown` it can be reused. +To avoid keeping track of a global counter we use the blockHeight to generate +a unique key for the KV-Store. +Therefore, it is only possible to perform one redelegation per block. + +- RedelegationCooldown: `0x07 | DelegatorAddr | blockHeight -> ProtocolBuffer(redelegationCooldown)` + +```go +type RedelegationCooldown struct { + Address String + CreationDate uint64 +} +``` + diff --git a/x/delegation/spec/03_messages.md b/x/delegation/spec/03_messages.md new file mode 100644 index 00000000..416d7556 --- /dev/null +++ b/x/delegation/spec/03_messages.md @@ -0,0 +1,43 @@ + + +# Messages + +## `MsgDelegate` + +Using this message, a user can delegate a specified amount to a KYVE protocol +validator. The chosen validator must exist in the `x/stakers` module. +Otherwise, the transaction will fail. If the user previously delegated to this +validator, any pending rewards will be withdrawn immediately. + +Delegated $KYVE tokens are locked for `DelegationUnbondingTime` seconds. This +is the minimum time users need to wait before they can use their tokens again. + +## `MsgWithdrawRewards` + +It is impossible to distribute rewards to delegators immediately. This is +because of gas limits. Therefore, all rewards are collected in a pool, and +delegators can use this message to withdraw their pending rewards. + +## `MsgUndelegate` + +This message starts the undelegation process by creating a new entry in the +unbonding queue. Nothing else happens after that, and users will still receive +rewards and are still subject to slashing. After `DelegationUnbondingTime` +seconds, the actual unbonding is performed via an end-block hook. + +After the unbonding time has passed, if the amount requested to undelegate is +higher than the actual amount (because of a slashing event), only the available +amount is returned to the user. + +## `MsgRedelegate` + +This message allows delegators to switch their delegation between different +KYVE protocol validators. It is only possible to redelegate to active +validators (this means they are participating in at least one storage pool). + +Every delegator has `RedelegationMaxAmount` number of spells. Once a spell is +cast, it goes on a cooldown for `RedelegationCooldown` seconds. If all +redelegation slots are used, the user must wait until the first slot is +available again. diff --git a/x/delegation/spec/04_end_block.md b/x/delegation/spec/04_end_block.md new file mode 100644 index 00000000..1f512a89 --- /dev/null +++ b/x/delegation/spec/04_end_block.md @@ -0,0 +1,14 @@ + + +# EndBlock + +The `x/delegation` module end-block hook handles the unbonding queue. After the +`DelegationUnbondingTime` time has passed, delegators will receive the number +of tokens they undelegated. However, if the validator they were delegating to +was slashed during this time, the received amount will be smaller. + +Please note that a queue like unbonding doesn't track redelegation. Instead, +the remaining redelegation slots are calculated on demand during transaction +execution. diff --git a/x/delegation/spec/05_events.md b/x/delegation/spec/05_events.md new file mode 100644 index 00000000..20b5b405 --- /dev/null +++ b/x/delegation/spec/05_events.md @@ -0,0 +1,42 @@ + + +# Events + +The `x/delegation` module emits the following events: + +## EndBlocker + +| Type | Attribute Key | Attribute Value | +|-------------------|---------------|--------------------| +| `EventUndelegate` | address | {delegatorAddress} | +| `EventUndelegate` | staker | {stakerAddress} | +| `EventUndelegate` | amount | {amount} | + +## Messages + +### `MsgDelegate` + +| Type | Attribute Key | Attribute Value | +|-----------------|---------------|--------------------| +| `EventDelegate` | address | {delegatorAddress} | +| `EventDelegate` | staker | {stakerAddress} | +| `EventDelegate` | amount | {amount} | + +### `MsgRedelegate` + +| Type | Attribute Key | Attribute Value | +|-------------------|---------------|---------------------| +| `EventRedelegate` | address | {delegatorAddress} | +| `EventRedelegate` | from_staker | {fromStakerAddress} | +| `EventRedelegate` | to_staker | {toStakerAddress} | +| `EventRedelegate` | amount | {amount} | + +### `MsgWithdrawRewards` + +| Type | Attribute Key | Attribute Value | +|------------------------|---------------|--------------------| +| `EventWithdrawRewards` | address | {delegatorAddress} | +| `EventWithdrawRewards` | staker | {stakerAddress} | +| `EventWithdrawRewards` | amount | {amount} | diff --git a/x/delegation/spec/06_params.md b/x/delegation/spec/06_params.md new file mode 100644 index 00000000..278b293a --- /dev/null +++ b/x/delegation/spec/06_params.md @@ -0,0 +1,13 @@ + + +# Parameters + +The `x/delegation` module relies on the following parameters: + +| Key | Type | Default Value | +|---------------------------|-----------------|---------------| +| `UnbondingDelegationTime` | uint64 (time s) | 432000 | +| `RedelegationCooldown` | uint64 (time s) | 432000 | +| `RedelegationMaxAmount` | uint64 (time s) | 5 | diff --git a/x/delegation/spec/07_exported.md b/x/delegation/spec/07_exported.md new file mode 100644 index 00000000..9772bfc9 --- /dev/null +++ b/x/delegation/spec/07_exported.md @@ -0,0 +1,41 @@ + + +# Exported + +The `x/delegation` module exports the following functions, which can be used +outside the module. These functions will not return an error, as everything is +handled internally. + +```go +type DelegationKeeper interface { + + // GetDelegationAmount returns the sum of all delegations for a specific staker. + // If the staker does not exist, it returns zero as the staker has zero delegations + GetDelegationAmount(ctx sdk.Context, staker string) uint64 + // GetDelegationAmountOfDelegator returns the amount of how many $KYVE `delegatorAddress` + // has delegated to `stakerAddress`. If one of the addresses does not exist, it returns zero. + GetDelegationAmountOfDelegator(ctx sdk.Context, stakerAddress string, delegatorAddress string) uint64 + + // GetDelegationOfPool returns the amount of how many $KYVE users have delegated + // to stakers that are participating in the given pool + GetDelegationOfPool(ctx sdk.Context, poolId uint64) uint64 + + // PayoutRewards transfers `amount` $nKYVE from the `payerModuleName`-module to the delegation module. + // It then awards these tokens internally to all delegators of staker `staker`. + // Delegators can then receive these rewards if they call the `withdraw`-transaction. + // This method returns false if the payout fails. This happens usually if there are no + // delegators for that staker. If this happens one should do something else with the rewards. + PayoutRewards(ctx sdk.Context, staker string, amount uint64, payerModuleName string) (success bool) + + // SlashDelegators reduces the delegation of all delegators of `staker` by fraction + // and transfers the amount to the Treasury. + SlashDelegators(ctx sdk.Context, poolId uint64, staker string, slashType stakertypes.SlashType) + + // GetOutstandingRewards calculates the current rewards a delegator has collected for + // the given staker. + GetOutstandingRewards(ctx sdk.Context, staker string, delegator string) uint64 + +} +``` diff --git a/x/delegation/types/codec.go b/x/delegation/types/codec.go new file mode 100644 index 00000000..17853d7a --- /dev/null +++ b/x/delegation/types/codec.go @@ -0,0 +1,22 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterCodec(_ *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgDelegate{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgWithdrawRewards{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUndelegate{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgRedelegate{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateParams{}) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/delegation/types/delegation.pb.go b/x/delegation/types/delegation.pb.go new file mode 100644 index 00000000..610bbb00 --- /dev/null +++ b/x/delegation/types/delegation.pb.go @@ -0,0 +1,2121 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/delegation/v1beta1/delegation.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SlashType ... +type SlashType int32 + +const ( + // SLASH_TYPE_UNSPECIFIED ... + SLASH_TYPE_UNSPECIFIED SlashType = 0 + // SLASH_TYPE_TIMEOUT ... + SLASH_TYPE_TIMEOUT SlashType = 1 + // SLASH_TYPE_VOTE ... + SLASH_TYPE_VOTE SlashType = 2 + // SLASH_TYPE_UPLOAD ... + SLASH_TYPE_UPLOAD SlashType = 3 +) + +var SlashType_name = map[int32]string{ + 0: "SLASH_TYPE_UNSPECIFIED", + 1: "SLASH_TYPE_TIMEOUT", + 2: "SLASH_TYPE_VOTE", + 3: "SLASH_TYPE_UPLOAD", +} + +var SlashType_value = map[string]int32{ + "SLASH_TYPE_UNSPECIFIED": 0, + "SLASH_TYPE_TIMEOUT": 1, + "SLASH_TYPE_VOTE": 2, + "SLASH_TYPE_UPLOAD": 3, +} + +func (x SlashType) String() string { + return proto.EnumName(SlashType_name, int32(x)) +} + +func (SlashType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{0} +} + +// Delegator stores the information that one address has delegated to another address +// It stores important information for the F1-Fee distribution algorithm +type Delegator struct { + // staker corresponds to a KYVE-staker on the protocol-side + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // delegator the user who delegate to the staker. + // If staker and delegator are the same we call it: self-delegation + Delegator string `protobuf:"bytes,2,opt,name=delegator,proto3" json:"delegator,omitempty"` + // k_index is an internal index for the f1-distribution algorithm + KIndex uint64 `protobuf:"varint,3,opt,name=k_index,json=kIndex,proto3" json:"k_index,omitempty"` + // initial_amount of stake the user had when it delegated. + // slashes can cause that the actual stake is lower. + InitialAmount uint64 `protobuf:"varint,4,opt,name=initial_amount,json=initialAmount,proto3" json:"initial_amount,omitempty"` +} + +func (m *Delegator) Reset() { *m = Delegator{} } +func (m *Delegator) String() string { return proto.CompactTextString(m) } +func (*Delegator) ProtoMessage() {} +func (*Delegator) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{0} +} +func (m *Delegator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Delegator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Delegator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Delegator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Delegator.Merge(m, src) +} +func (m *Delegator) XXX_Size() int { + return m.Size() +} +func (m *Delegator) XXX_DiscardUnknown() { + xxx_messageInfo_Delegator.DiscardUnknown(m) +} + +var xxx_messageInfo_Delegator proto.InternalMessageInfo + +func (m *Delegator) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *Delegator) GetDelegator() string { + if m != nil { + return m.Delegator + } + return "" +} + +func (m *Delegator) GetKIndex() uint64 { + if m != nil { + return m.KIndex + } + return 0 +} + +func (m *Delegator) GetInitialAmount() uint64 { + if m != nil { + return m.InitialAmount + } + return 0 +} + +// DelegationEntry represents an entry according to the F1-Fee-Distribution algorithm. +// Take a look at x/delegation/keeper/logic_f1distribution.go for more details +type DelegationEntry struct { + // staker on protocol level + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // k_index is the of the period this entry ends + KIndex uint64 `protobuf:"varint,2,opt,name=k_index,json=kIndex,proto3" json:"k_index,omitempty"` + // value is the quotient of collected rewards and total stake according to F1-distribution + Value github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,3,opt,name=value,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"value"` +} + +func (m *DelegationEntry) Reset() { *m = DelegationEntry{} } +func (m *DelegationEntry) String() string { return proto.CompactTextString(m) } +func (*DelegationEntry) ProtoMessage() {} +func (*DelegationEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{1} +} +func (m *DelegationEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DelegationEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DelegationEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DelegationEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_DelegationEntry.Merge(m, src) +} +func (m *DelegationEntry) XXX_Size() int { + return m.Size() +} +func (m *DelegationEntry) XXX_DiscardUnknown() { + xxx_messageInfo_DelegationEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_DelegationEntry proto.InternalMessageInfo + +func (m *DelegationEntry) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *DelegationEntry) GetKIndex() uint64 { + if m != nil { + return m.KIndex + } + return 0 +} + +// DelegationPoolData stores general delegation information for every staker +type DelegationData struct { + // Every staker has one DelegationData + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // current_rewards ... + CurrentRewards uint64 `protobuf:"varint,2,opt,name=current_rewards,json=currentRewards,proto3" json:"current_rewards,omitempty"` + // total_delegation ... + TotalDelegation uint64 `protobuf:"varint,3,opt,name=total_delegation,json=totalDelegation,proto3" json:"total_delegation,omitempty"` + // latest_index_k ... + LatestIndexK uint64 `protobuf:"varint,4,opt,name=latest_index_k,json=latestIndexK,proto3" json:"latest_index_k,omitempty"` + // delegator_count the amount of different addresses delegating to the staker + DelegatorCount uint64 `protobuf:"varint,5,opt,name=delegator_count,json=delegatorCount,proto3" json:"delegator_count,omitempty"` + // latest_index_was_undelegation helps indicates when an entry can be deleted + LatestIndexWasUndelegation bool `protobuf:"varint,6,opt,name=latest_index_was_undelegation,json=latestIndexWasUndelegation,proto3" json:"latest_index_was_undelegation,omitempty"` +} + +func (m *DelegationData) Reset() { *m = DelegationData{} } +func (m *DelegationData) String() string { return proto.CompactTextString(m) } +func (*DelegationData) ProtoMessage() {} +func (*DelegationData) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{2} +} +func (m *DelegationData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DelegationData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DelegationData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DelegationData) XXX_Merge(src proto.Message) { + xxx_messageInfo_DelegationData.Merge(m, src) +} +func (m *DelegationData) XXX_Size() int { + return m.Size() +} +func (m *DelegationData) XXX_DiscardUnknown() { + xxx_messageInfo_DelegationData.DiscardUnknown(m) +} + +var xxx_messageInfo_DelegationData proto.InternalMessageInfo + +func (m *DelegationData) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *DelegationData) GetCurrentRewards() uint64 { + if m != nil { + return m.CurrentRewards + } + return 0 +} + +func (m *DelegationData) GetTotalDelegation() uint64 { + if m != nil { + return m.TotalDelegation + } + return 0 +} + +func (m *DelegationData) GetLatestIndexK() uint64 { + if m != nil { + return m.LatestIndexK + } + return 0 +} + +func (m *DelegationData) GetDelegatorCount() uint64 { + if m != nil { + return m.DelegatorCount + } + return 0 +} + +func (m *DelegationData) GetLatestIndexWasUndelegation() bool { + if m != nil { + return m.LatestIndexWasUndelegation + } + return false +} + +// DelegationSlash represents an f1-slash +// these entries needs to be iterated to obtain the current amount of the actual stake +// Every staker can have n slash-entries +type DelegationSlash struct { + // staker who got slashed + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // k_index for f1-algorithm + KIndex uint64 `protobuf:"varint,2,opt,name=k_index,json=kIndex,proto3" json:"k_index,omitempty"` + // fraction that got slashed + Fraction github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,3,opt,name=fraction,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"fraction"` +} + +func (m *DelegationSlash) Reset() { *m = DelegationSlash{} } +func (m *DelegationSlash) String() string { return proto.CompactTextString(m) } +func (*DelegationSlash) ProtoMessage() {} +func (*DelegationSlash) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{3} +} +func (m *DelegationSlash) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DelegationSlash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DelegationSlash.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DelegationSlash) XXX_Merge(src proto.Message) { + xxx_messageInfo_DelegationSlash.Merge(m, src) +} +func (m *DelegationSlash) XXX_Size() int { + return m.Size() +} +func (m *DelegationSlash) XXX_DiscardUnknown() { + xxx_messageInfo_DelegationSlash.DiscardUnknown(m) +} + +var xxx_messageInfo_DelegationSlash proto.InternalMessageInfo + +func (m *DelegationSlash) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *DelegationSlash) GetKIndex() uint64 { + if m != nil { + return m.KIndex + } + return 0 +} + +// UndelegationQueueEntry ... +type UndelegationQueueEntry struct { + // index ... + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // delegator ... + Delegator string `protobuf:"bytes,3,opt,name=delegator,proto3" json:"delegator,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` + // creation_time ... + CreationTime uint64 `protobuf:"varint,5,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` +} + +func (m *UndelegationQueueEntry) Reset() { *m = UndelegationQueueEntry{} } +func (m *UndelegationQueueEntry) String() string { return proto.CompactTextString(m) } +func (*UndelegationQueueEntry) ProtoMessage() {} +func (*UndelegationQueueEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{4} +} +func (m *UndelegationQueueEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UndelegationQueueEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UndelegationQueueEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UndelegationQueueEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_UndelegationQueueEntry.Merge(m, src) +} +func (m *UndelegationQueueEntry) XXX_Size() int { + return m.Size() +} +func (m *UndelegationQueueEntry) XXX_DiscardUnknown() { + xxx_messageInfo_UndelegationQueueEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_UndelegationQueueEntry proto.InternalMessageInfo + +func (m *UndelegationQueueEntry) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *UndelegationQueueEntry) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *UndelegationQueueEntry) GetDelegator() string { + if m != nil { + return m.Delegator + } + return "" +} + +func (m *UndelegationQueueEntry) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *UndelegationQueueEntry) GetCreationTime() uint64 { + if m != nil { + return m.CreationTime + } + return 0 +} + +// QueueState ... +type QueueState struct { + // low_index ... + LowIndex uint64 `protobuf:"varint,1,opt,name=low_index,json=lowIndex,proto3" json:"low_index,omitempty"` + // high_index ... + HighIndex uint64 `protobuf:"varint,2,opt,name=high_index,json=highIndex,proto3" json:"high_index,omitempty"` +} + +func (m *QueueState) Reset() { *m = QueueState{} } +func (m *QueueState) String() string { return proto.CompactTextString(m) } +func (*QueueState) ProtoMessage() {} +func (*QueueState) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{5} +} +func (m *QueueState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueueState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueueState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueueState) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueState.Merge(m, src) +} +func (m *QueueState) XXX_Size() int { + return m.Size() +} +func (m *QueueState) XXX_DiscardUnknown() { + xxx_messageInfo_QueueState.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueState proto.InternalMessageInfo + +func (m *QueueState) GetLowIndex() uint64 { + if m != nil { + return m.LowIndex + } + return 0 +} + +func (m *QueueState) GetHighIndex() uint64 { + if m != nil { + return m.HighIndex + } + return 0 +} + +// RedelegationCooldown ... +type RedelegationCooldown struct { + // low_index ... + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // high_index ... + CreationDate uint64 `protobuf:"varint,2,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` +} + +func (m *RedelegationCooldown) Reset() { *m = RedelegationCooldown{} } +func (m *RedelegationCooldown) String() string { return proto.CompactTextString(m) } +func (*RedelegationCooldown) ProtoMessage() {} +func (*RedelegationCooldown) Descriptor() ([]byte, []int) { + return fileDescriptor_e07f10cb3da486ac, []int{6} +} +func (m *RedelegationCooldown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedelegationCooldown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RedelegationCooldown.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RedelegationCooldown) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedelegationCooldown.Merge(m, src) +} +func (m *RedelegationCooldown) XXX_Size() int { + return m.Size() +} +func (m *RedelegationCooldown) XXX_DiscardUnknown() { + xxx_messageInfo_RedelegationCooldown.DiscardUnknown(m) +} + +var xxx_messageInfo_RedelegationCooldown proto.InternalMessageInfo + +func (m *RedelegationCooldown) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *RedelegationCooldown) GetCreationDate() uint64 { + if m != nil { + return m.CreationDate + } + return 0 +} + +func init() { + proto.RegisterEnum("kyve.delegation.v1beta1.SlashType", SlashType_name, SlashType_value) + proto.RegisterType((*Delegator)(nil), "kyve.delegation.v1beta1.Delegator") + proto.RegisterType((*DelegationEntry)(nil), "kyve.delegation.v1beta1.DelegationEntry") + proto.RegisterType((*DelegationData)(nil), "kyve.delegation.v1beta1.DelegationData") + proto.RegisterType((*DelegationSlash)(nil), "kyve.delegation.v1beta1.DelegationSlash") + proto.RegisterType((*UndelegationQueueEntry)(nil), "kyve.delegation.v1beta1.UndelegationQueueEntry") + proto.RegisterType((*QueueState)(nil), "kyve.delegation.v1beta1.QueueState") + proto.RegisterType((*RedelegationCooldown)(nil), "kyve.delegation.v1beta1.RedelegationCooldown") +} + +func init() { + proto.RegisterFile("kyve/delegation/v1beta1/delegation.proto", fileDescriptor_e07f10cb3da486ac) +} + +var fileDescriptor_e07f10cb3da486ac = []byte{ + // 659 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcf, 0x4e, 0xdb, 0x4e, + 0x10, 0x8e, 0x03, 0x04, 0x3c, 0x82, 0x24, 0xbf, 0xfd, 0xd1, 0x10, 0xa5, 0xc5, 0x20, 0xf7, 0x5f, + 0x5a, 0xa9, 0xb1, 0x50, 0x9f, 0x20, 0x60, 0x57, 0xa4, 0x50, 0xa0, 0xf9, 0x43, 0x45, 0x2f, 0xd6, + 0x62, 0x6f, 0x13, 0xcb, 0x8e, 0x97, 0xda, 0x6b, 0x42, 0x8e, 0xbd, 0x71, 0x42, 0x7d, 0x85, 0xaa, + 0x2f, 0xc3, 0x91, 0x63, 0xd5, 0x03, 0xaa, 0xe0, 0x45, 0xaa, 0xac, 0x97, 0x64, 0x53, 0x89, 0x03, + 0xa7, 0x64, 0xbe, 0x19, 0xcf, 0xf7, 0xcd, 0x37, 0xa3, 0x85, 0xaa, 0x3f, 0x3c, 0x25, 0x86, 0x4b, + 0x02, 0xd2, 0xc5, 0xcc, 0xa3, 0xa1, 0x71, 0xba, 0x71, 0x4c, 0x18, 0xde, 0x90, 0xa0, 0xda, 0x49, + 0x44, 0x19, 0x45, 0x2b, 0xa3, 0xca, 0x9a, 0x04, 0x8b, 0xca, 0xca, 0x72, 0x97, 0x76, 0x29, 0xaf, + 0x31, 0x46, 0xff, 0xd2, 0x72, 0xfd, 0x9b, 0x02, 0xaa, 0x99, 0x16, 0xd3, 0x08, 0x95, 0x20, 0x17, + 0x33, 0xec, 0x93, 0xa8, 0xac, 0xac, 0x2b, 0x55, 0xb5, 0x29, 0x22, 0xf4, 0x04, 0x54, 0xf7, 0xae, + 0xa8, 0x9c, 0xe5, 0xa9, 0x09, 0x80, 0x56, 0x60, 0xde, 0xb7, 0xbd, 0xd0, 0x25, 0x67, 0xe5, 0x99, + 0x75, 0xa5, 0x3a, 0xdb, 0xcc, 0xf9, 0x8d, 0x51, 0x84, 0x9e, 0x43, 0xde, 0x0b, 0x3d, 0xe6, 0xe1, + 0xc0, 0xc6, 0x7d, 0x9a, 0x84, 0xac, 0x3c, 0xcb, 0xf3, 0x4b, 0x02, 0xad, 0x73, 0x50, 0x3f, 0x57, + 0xa0, 0x60, 0x8e, 0x05, 0x5b, 0x21, 0x8b, 0x86, 0xf7, 0x2a, 0x91, 0xb8, 0xb2, 0x53, 0x5c, 0x26, + 0xcc, 0x9d, 0xe2, 0x20, 0x21, 0x5c, 0x82, 0xba, 0x59, 0xbb, 0xbc, 0x5e, 0xcb, 0xfc, 0xbe, 0x5e, + 0x7b, 0xd1, 0xf5, 0x58, 0x2f, 0x39, 0xae, 0x39, 0xb4, 0x6f, 0x38, 0x34, 0xee, 0xd3, 0x58, 0xfc, + 0xbc, 0x89, 0x5d, 0xdf, 0x60, 0xc3, 0x13, 0x12, 0xd7, 0x4c, 0xe2, 0x34, 0xd3, 0x8f, 0xf5, 0x8b, + 0x2c, 0xe4, 0x27, 0x52, 0x4c, 0xcc, 0xf0, 0xbd, 0x4a, 0x5e, 0x42, 0xc1, 0x49, 0xa2, 0x88, 0x84, + 0xcc, 0x8e, 0xc8, 0x00, 0x47, 0x6e, 0x2c, 0x14, 0xe5, 0x05, 0xdc, 0x4c, 0x51, 0xf4, 0x0a, 0x8a, + 0x8c, 0x32, 0x1c, 0xd8, 0x93, 0xa5, 0x08, 0x9f, 0x0a, 0x1c, 0x9f, 0xf0, 0xa1, 0x67, 0x90, 0x0f, + 0x30, 0x23, 0x31, 0x4b, 0x47, 0xb4, 0x7d, 0x61, 0xd8, 0x62, 0x8a, 0xf2, 0x49, 0x77, 0x46, 0xcc, + 0x63, 0xf3, 0x6d, 0x87, 0xfb, 0x3a, 0x97, 0x32, 0x8f, 0xe1, 0xad, 0x11, 0x8a, 0xea, 0xb0, 0x3a, + 0xd5, 0x6e, 0x80, 0x63, 0x3b, 0x09, 0x25, 0x19, 0xb9, 0x75, 0xa5, 0xba, 0xd0, 0xac, 0x48, 0xdd, + 0x3f, 0xe1, 0xb8, 0x23, 0x55, 0xe8, 0x17, 0x53, 0xbb, 0x69, 0x05, 0x38, 0xee, 0x3d, 0x7c, 0x37, + 0xef, 0x61, 0xe1, 0x4b, 0x84, 0x9d, 0xf1, 0xe4, 0x0f, 0x5f, 0xcf, 0xf8, 0x7b, 0xfd, 0x87, 0x02, + 0x25, 0x59, 0xe1, 0xc7, 0x84, 0x24, 0x24, 0xbd, 0x99, 0x65, 0x98, 0x4b, 0xd9, 0x15, 0xce, 0x9e, + 0x06, 0x92, 0xda, 0xec, 0xfd, 0x37, 0x3d, 0xf3, 0xef, 0x4d, 0x97, 0x20, 0x37, 0x75, 0xb2, 0x22, + 0x42, 0x4f, 0x61, 0xc9, 0x89, 0x08, 0x67, 0xb6, 0x99, 0xd7, 0x27, 0xc2, 0xf9, 0xc5, 0x3b, 0xb0, + 0xed, 0xf5, 0x89, 0xbe, 0x0d, 0xc0, 0x65, 0xb5, 0x18, 0x66, 0x04, 0x3d, 0x06, 0x35, 0xa0, 0x03, + 0x5b, 0x96, 0xb6, 0x10, 0xd0, 0x41, 0x6a, 0xcd, 0x2a, 0x40, 0xcf, 0xeb, 0xf6, 0xa6, 0x6c, 0x53, + 0x47, 0x08, 0x4f, 0xeb, 0x1d, 0x58, 0x6e, 0x92, 0xc9, 0xb0, 0x5b, 0x94, 0x06, 0x2e, 0x1d, 0x84, + 0xa8, 0x0c, 0xf3, 0xd8, 0x75, 0x23, 0x12, 0xc7, 0x62, 0x07, 0x77, 0xe1, 0x94, 0x40, 0x17, 0x33, + 0x22, 0x7a, 0x8e, 0x05, 0x9a, 0x98, 0x91, 0xd7, 0x5f, 0x41, 0xe5, 0xab, 0x6c, 0x0f, 0x4f, 0x08, + 0xaa, 0x40, 0xa9, 0xb5, 0x5b, 0x6f, 0x6d, 0xdb, 0xed, 0xa3, 0x03, 0xcb, 0xee, 0xec, 0xb5, 0x0e, + 0xac, 0xad, 0xc6, 0xbb, 0x86, 0x65, 0x16, 0x33, 0xa8, 0x04, 0x48, 0xca, 0xb5, 0x1b, 0x1f, 0xac, + 0xfd, 0x4e, 0xbb, 0xa8, 0xa0, 0xff, 0xa1, 0x20, 0xe1, 0x87, 0xfb, 0x6d, 0xab, 0x98, 0x45, 0x8f, + 0xe0, 0x3f, 0xb9, 0xd1, 0xc1, 0xee, 0x7e, 0xdd, 0x2c, 0xce, 0x54, 0x66, 0xcf, 0x7f, 0x6a, 0x99, + 0xcd, 0xc6, 0xe5, 0x8d, 0xa6, 0x5c, 0xdd, 0x68, 0xca, 0x9f, 0x1b, 0x4d, 0xf9, 0x7e, 0xab, 0x65, + 0xae, 0x6e, 0xb5, 0xcc, 0xaf, 0x5b, 0x2d, 0xf3, 0xd9, 0x90, 0x6e, 0x60, 0xe7, 0xe8, 0xd0, 0xda, + 0x23, 0x6c, 0x40, 0x23, 0xdf, 0x70, 0x7a, 0xd8, 0x0b, 0x8d, 0x33, 0xf9, 0xd5, 0xe3, 0x07, 0x71, + 0x9c, 0xe3, 0x4f, 0xd7, 0xdb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x40, 0x35, 0xc4, 0x38, 0x15, + 0x05, 0x00, 0x00, +} + +func (m *Delegator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Delegator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Delegator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialAmount != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.InitialAmount)) + i-- + dAtA[i] = 0x20 + } + if m.KIndex != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.KIndex)) + i-- + dAtA[i] = 0x18 + } + if len(m.Delegator) > 0 { + i -= len(m.Delegator) + copy(dAtA[i:], m.Delegator) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Delegator))) + i-- + dAtA[i] = 0x12 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DelegationEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DelegationEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DelegationEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.KIndex != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.KIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DelegationData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DelegationData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DelegationData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LatestIndexWasUndelegation { + i-- + if m.LatestIndexWasUndelegation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.DelegatorCount != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.DelegatorCount)) + i-- + dAtA[i] = 0x28 + } + if m.LatestIndexK != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.LatestIndexK)) + i-- + dAtA[i] = 0x20 + } + if m.TotalDelegation != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.TotalDelegation)) + i-- + dAtA[i] = 0x18 + } + if m.CurrentRewards != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.CurrentRewards)) + i-- + dAtA[i] = 0x10 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DelegationSlash) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DelegationSlash) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DelegationSlash) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Fraction.Size() + i -= size + if _, err := m.Fraction.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.KIndex != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.KIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UndelegationQueueEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UndelegationQueueEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UndelegationQueueEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreationTime != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.CreationTime)) + i-- + dAtA[i] = 0x28 + } + if m.Amount != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x20 + } + if len(m.Delegator) > 0 { + i -= len(m.Delegator) + copy(dAtA[i:], m.Delegator) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Delegator))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueueState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueueState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueueState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HighIndex != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.HighIndex)) + i-- + dAtA[i] = 0x10 + } + if m.LowIndex != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.LowIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RedelegationCooldown) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedelegationCooldown) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RedelegationCooldown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreationDate != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.CreationDate)) + i-- + dAtA[i] = 0x10 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDelegation(dAtA []byte, offset int, v uint64) int { + offset -= sovDelegation(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Delegator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.KIndex != 0 { + n += 1 + sovDelegation(uint64(m.KIndex)) + } + if m.InitialAmount != 0 { + n += 1 + sovDelegation(uint64(m.InitialAmount)) + } + return n +} + +func (m *DelegationEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.KIndex != 0 { + n += 1 + sovDelegation(uint64(m.KIndex)) + } + l = m.Value.Size() + n += 1 + l + sovDelegation(uint64(l)) + return n +} + +func (m *DelegationData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.CurrentRewards != 0 { + n += 1 + sovDelegation(uint64(m.CurrentRewards)) + } + if m.TotalDelegation != 0 { + n += 1 + sovDelegation(uint64(m.TotalDelegation)) + } + if m.LatestIndexK != 0 { + n += 1 + sovDelegation(uint64(m.LatestIndexK)) + } + if m.DelegatorCount != 0 { + n += 1 + sovDelegation(uint64(m.DelegatorCount)) + } + if m.LatestIndexWasUndelegation { + n += 2 + } + return n +} + +func (m *DelegationSlash) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.KIndex != 0 { + n += 1 + sovDelegation(uint64(m.KIndex)) + } + l = m.Fraction.Size() + n += 1 + l + sovDelegation(uint64(l)) + return n +} + +func (m *UndelegationQueueEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovDelegation(uint64(m.Index)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovDelegation(uint64(m.Amount)) + } + if m.CreationTime != 0 { + n += 1 + sovDelegation(uint64(m.CreationTime)) + } + return n +} + +func (m *QueueState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LowIndex != 0 { + n += 1 + sovDelegation(uint64(m.LowIndex)) + } + if m.HighIndex != 0 { + n += 1 + sovDelegation(uint64(m.HighIndex)) + } + return n +} + +func (m *RedelegationCooldown) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.CreationDate != 0 { + n += 1 + sovDelegation(uint64(m.CreationDate)) + } + return n +} + +func sovDelegation(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDelegation(x uint64) (n int) { + return sovDelegation(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Delegator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Delegator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Delegator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KIndex", wireType) + } + m.KIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialAmount", wireType) + } + m.InitialAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DelegationEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DelegationEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DelegationEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KIndex", wireType) + } + m.KIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DelegationData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DelegationData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DelegationData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRewards", wireType) + } + m.CurrentRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDelegation", wireType) + } + m.TotalDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestIndexK", wireType) + } + m.LatestIndexK = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestIndexK |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorCount", wireType) + } + m.DelegatorCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelegatorCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestIndexWasUndelegation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LatestIndexWasUndelegation = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DelegationSlash) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DelegationSlash: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DelegationSlash: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KIndex", wireType) + } + m.KIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fraction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Fraction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UndelegationQueueEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UndelegationQueueEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UndelegationQueueEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTime", wireType) + } + m.CreationTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueueState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueueState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueueState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LowIndex", wireType) + } + m.LowIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LowIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HighIndex", wireType) + } + m.HighIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HighIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedelegationCooldown) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedelegationCooldown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedelegationCooldown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationDate", wireType) + } + m.CreationDate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationDate |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDelegation(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDelegation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDelegation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDelegation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDelegation + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDelegation + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDelegation + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDelegation = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDelegation = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDelegation = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/delegation/types/errors.go b/x/delegation/types/errors.go new file mode 100644 index 00000000..08215b15 --- /dev/null +++ b/x/delegation/types/errors.go @@ -0,0 +1,14 @@ +package types + +import ( + sdkErrors "cosmossdk.io/errors" +) + +var ( + ErrNotADelegator = sdkErrors.Register(ModuleName, 1000, "not a delegator") + ErrNotEnoughDelegation = sdkErrors.Register(ModuleName, 1001, "undelegate-amount is larger than current delegation") + ErrRedelegationOnCooldown = sdkErrors.Register(ModuleName, 1002, "all redelegation slots are on cooldown") + ErrMultipleRedelegationInSameBlock = sdkErrors.Register(ModuleName, 1003, "only one redelegation per delegator per block") + ErrStakerDoesNotExist = sdkErrors.Register(ModuleName, 1004, "staker does not exist") + ErrRedelegationToInactiveStaker = sdkErrors.Register(ModuleName, 1005, "redelegation to inactive staker not allowed") +) diff --git a/x/delegation/types/events.pb.go b/x/delegation/types/events.pb.go new file mode 100644 index 00000000..0092d011 --- /dev/null +++ b/x/delegation/types/events.pb.go @@ -0,0 +1,1536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/delegation/v1beta1/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventDelegate is an event emitted when someone delegates to a protocol node. +// emitted_by: MsgDelegate +type EventDelegate struct { + // address is the account address of the delegator. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // staker is the account address of the protocol node. + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventDelegate) Reset() { *m = EventDelegate{} } +func (m *EventDelegate) String() string { return proto.CompactTextString(m) } +func (*EventDelegate) ProtoMessage() {} +func (*EventDelegate) Descriptor() ([]byte, []int) { + return fileDescriptor_d01988a9108a2e89, []int{0} +} +func (m *EventDelegate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDelegate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDelegate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDelegate) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDelegate.Merge(m, src) +} +func (m *EventDelegate) XXX_Size() int { + return m.Size() +} +func (m *EventDelegate) XXX_DiscardUnknown() { + xxx_messageInfo_EventDelegate.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDelegate proto.InternalMessageInfo + +func (m *EventDelegate) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventDelegate) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventDelegate) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventUndelegate is an event emitted when someone undelegates from a protocol node. +// emitted_by: EndBlock +type EventUndelegate struct { + // address is the account address of the delegator. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // staker is the account address of the protocol node. + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventUndelegate) Reset() { *m = EventUndelegate{} } +func (m *EventUndelegate) String() string { return proto.CompactTextString(m) } +func (*EventUndelegate) ProtoMessage() {} +func (*EventUndelegate) Descriptor() ([]byte, []int) { + return fileDescriptor_d01988a9108a2e89, []int{1} +} +func (m *EventUndelegate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventUndelegate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventUndelegate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventUndelegate) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventUndelegate.Merge(m, src) +} +func (m *EventUndelegate) XXX_Size() int { + return m.Size() +} +func (m *EventUndelegate) XXX_DiscardUnknown() { + xxx_messageInfo_EventUndelegate.DiscardUnknown(m) +} + +var xxx_messageInfo_EventUndelegate proto.InternalMessageInfo + +func (m *EventUndelegate) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventUndelegate) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventUndelegate) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventRedelegate is an event emitted when someone redelegates from one protocol node to another. +// emitted_by: MsgRedelegate +type EventRedelegate struct { + // address is the account address of the delegator. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // from_staker ... + FromStaker string `protobuf:"bytes,2,opt,name=from_staker,json=fromStaker,proto3" json:"from_staker,omitempty"` + // to_staker is the account address of the new staker in the the pool + ToStaker string `protobuf:"bytes,3,opt,name=to_staker,json=toStaker,proto3" json:"to_staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventRedelegate) Reset() { *m = EventRedelegate{} } +func (m *EventRedelegate) String() string { return proto.CompactTextString(m) } +func (*EventRedelegate) ProtoMessage() {} +func (*EventRedelegate) Descriptor() ([]byte, []int) { + return fileDescriptor_d01988a9108a2e89, []int{2} +} +func (m *EventRedelegate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventRedelegate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventRedelegate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventRedelegate) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventRedelegate.Merge(m, src) +} +func (m *EventRedelegate) XXX_Size() int { + return m.Size() +} +func (m *EventRedelegate) XXX_DiscardUnknown() { + xxx_messageInfo_EventRedelegate.DiscardUnknown(m) +} + +var xxx_messageInfo_EventRedelegate proto.InternalMessageInfo + +func (m *EventRedelegate) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventRedelegate) GetFromStaker() string { + if m != nil { + return m.FromStaker + } + return "" +} + +func (m *EventRedelegate) GetToStaker() string { + if m != nil { + return m.ToStaker + } + return "" +} + +func (m *EventRedelegate) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventWithdrawRewards ... +// emitted_by: MsgRedelegate, MsgDelegate, MsgWithdrawRewards, EndBlock +type EventWithdrawRewards struct { + // address is the account address of the delegator. + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // staker is the account address of the protocol node the users withdraws from. + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventWithdrawRewards) Reset() { *m = EventWithdrawRewards{} } +func (m *EventWithdrawRewards) String() string { return proto.CompactTextString(m) } +func (*EventWithdrawRewards) ProtoMessage() {} +func (*EventWithdrawRewards) Descriptor() ([]byte, []int) { + return fileDescriptor_d01988a9108a2e89, []int{3} +} +func (m *EventWithdrawRewards) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventWithdrawRewards) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventWithdrawRewards.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventWithdrawRewards) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventWithdrawRewards.Merge(m, src) +} +func (m *EventWithdrawRewards) XXX_Size() int { + return m.Size() +} +func (m *EventWithdrawRewards) XXX_DiscardUnknown() { + xxx_messageInfo_EventWithdrawRewards.DiscardUnknown(m) +} + +var xxx_messageInfo_EventWithdrawRewards proto.InternalMessageInfo + +func (m *EventWithdrawRewards) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventWithdrawRewards) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventWithdrawRewards) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventSlash is an event emitted when a protocol node is slashed. +// emitted_by: MsgSubmitBundleProposal, EndBlock +type EventSlash struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker is the account address of the protocol node. + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` + // slash_type + SlashType SlashType `protobuf:"varint,4,opt,name=slash_type,json=slashType,proto3,enum=kyve.delegation.v1beta1.SlashType" json:"slash_type,omitempty"` +} + +func (m *EventSlash) Reset() { *m = EventSlash{} } +func (m *EventSlash) String() string { return proto.CompactTextString(m) } +func (*EventSlash) ProtoMessage() {} +func (*EventSlash) Descriptor() ([]byte, []int) { + return fileDescriptor_d01988a9108a2e89, []int{4} +} +func (m *EventSlash) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSlash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventSlash.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventSlash) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSlash.Merge(m, src) +} +func (m *EventSlash) XXX_Size() int { + return m.Size() +} +func (m *EventSlash) XXX_DiscardUnknown() { + xxx_messageInfo_EventSlash.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSlash proto.InternalMessageInfo + +func (m *EventSlash) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventSlash) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventSlash) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *EventSlash) GetSlashType() SlashType { + if m != nil { + return m.SlashType + } + return SLASH_TYPE_UNSPECIFIED +} + +func init() { + proto.RegisterType((*EventDelegate)(nil), "kyve.delegation.v1beta1.EventDelegate") + proto.RegisterType((*EventUndelegate)(nil), "kyve.delegation.v1beta1.EventUndelegate") + proto.RegisterType((*EventRedelegate)(nil), "kyve.delegation.v1beta1.EventRedelegate") + proto.RegisterType((*EventWithdrawRewards)(nil), "kyve.delegation.v1beta1.EventWithdrawRewards") + proto.RegisterType((*EventSlash)(nil), "kyve.delegation.v1beta1.EventSlash") +} + +func init() { + proto.RegisterFile("kyve/delegation/v1beta1/events.proto", fileDescriptor_d01988a9108a2e89) +} + +var fileDescriptor_d01988a9108a2e89 = []byte{ + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x99, 0x0f, 0x02, 0x1f, 0xd7, 0xa8, 0x49, 0x63, 0xa4, 0xd1, 0xa4, 0x12, 0xe2, 0x82, + 0x55, 0x1b, 0xf4, 0x09, 0x34, 0xb2, 0x20, 0x26, 0x2e, 0x8a, 0x7f, 0x82, 0x2e, 0x70, 0x60, 0xae, + 0xb4, 0x01, 0x3a, 0xcd, 0xcc, 0x00, 0xb2, 0xf4, 0x0d, 0x5c, 0xfa, 0x48, 0x2e, 0x59, 0xba, 0x34, + 0xf0, 0x22, 0x66, 0x86, 0x12, 0xc1, 0x84, 0x44, 0x13, 0x76, 0x3d, 0x73, 0x4e, 0x7f, 0xe7, 0xde, + 0xe4, 0xc2, 0x71, 0x77, 0x3c, 0x44, 0x8f, 0x61, 0x0f, 0x3b, 0x54, 0x85, 0x3c, 0xf2, 0x86, 0x95, + 0x16, 0x2a, 0x5a, 0xf1, 0x70, 0x88, 0x91, 0x92, 0x6e, 0x2c, 0xb8, 0xe2, 0x56, 0x41, 0xa7, 0xdc, + 0xef, 0x94, 0x9b, 0xa4, 0x0e, 0xca, 0xeb, 0x7e, 0x5f, 0xca, 0x1a, 0x44, 0xa9, 0x01, 0xdb, 0x55, + 0x8d, 0xbc, 0x98, 0x1b, 0x68, 0xd9, 0x90, 0xa3, 0x8c, 0x09, 0x94, 0xd2, 0x26, 0x45, 0x52, 0xce, + 0xfb, 0x0b, 0x69, 0xed, 0x43, 0x56, 0x2a, 0xda, 0x45, 0x61, 0xff, 0x33, 0x46, 0xa2, 0xf4, 0x3b, + 0xed, 0xf3, 0x41, 0xa4, 0xec, 0x74, 0x91, 0x94, 0x33, 0x7e, 0xa2, 0x4a, 0x0f, 0xb0, 0x6b, 0xd0, + 0x37, 0x11, 0xdb, 0x3c, 0xfc, 0x85, 0x24, 0x74, 0x1f, 0x7f, 0x41, 0x3f, 0x82, 0xad, 0x27, 0xc1, + 0xfb, 0xcd, 0x95, 0x0a, 0xd0, 0x4f, 0xf5, 0x79, 0xcd, 0x21, 0xe4, 0x15, 0x5f, 0xd8, 0x69, 0x63, + 0xff, 0x57, 0xbc, 0xfe, 0x73, 0x86, 0xcc, 0xca, 0x0c, 0x8f, 0xb0, 0x67, 0x46, 0xb8, 0x0b, 0x55, + 0xc0, 0x04, 0x1d, 0xf9, 0x38, 0xa2, 0x82, 0xc9, 0x0d, 0x6e, 0xf9, 0x46, 0x00, 0x4c, 0x45, 0xbd, + 0x47, 0x65, 0x60, 0x15, 0x20, 0x17, 0x73, 0xde, 0x6b, 0x86, 0xcc, 0x80, 0x33, 0x7e, 0x56, 0xcb, + 0x1a, 0xfb, 0x2b, 0xd7, 0x3a, 0x03, 0x90, 0x9a, 0xd8, 0x54, 0xe3, 0x18, 0xcd, 0x56, 0x3b, 0x27, + 0x25, 0x77, 0xcd, 0x35, 0xb9, 0xa6, 0xfc, 0x7a, 0x1c, 0xa3, 0x9f, 0x97, 0x8b, 0xcf, 0xf3, 0xda, + 0xfb, 0xd4, 0x21, 0x93, 0xa9, 0x43, 0x3e, 0xa7, 0x0e, 0x79, 0x9d, 0x39, 0xa9, 0xc9, 0xcc, 0x49, + 0x7d, 0xcc, 0x9c, 0xd4, 0xbd, 0xd7, 0x09, 0x55, 0x30, 0x68, 0xb9, 0x6d, 0xde, 0xf7, 0x2e, 0x1b, + 0xb7, 0xd5, 0x2b, 0x54, 0x23, 0x2e, 0xba, 0x5e, 0x3b, 0xa0, 0x61, 0xe4, 0x3d, 0x2f, 0x9f, 0xa5, + 0xae, 0x97, 0xad, 0xac, 0x39, 0xc5, 0xd3, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0xe5, 0x5e, + 0x91, 0xf5, 0x02, 0x00, 0x00, +} + +func (m *EventDelegate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDelegate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDelegate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventUndelegate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventUndelegate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventUndelegate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventRedelegate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventRedelegate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventRedelegate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x20 + } + if len(m.ToStaker) > 0 { + i -= len(m.ToStaker) + copy(dAtA[i:], m.ToStaker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.ToStaker))) + i-- + dAtA[i] = 0x1a + } + if len(m.FromStaker) > 0 { + i -= len(m.FromStaker) + copy(dAtA[i:], m.FromStaker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.FromStaker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventWithdrawRewards) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventWithdrawRewards) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventWithdrawRewards) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventSlash) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSlash) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSlash) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SlashType != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.SlashType)) + i-- + dAtA[i] = 0x20 + } + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventDelegate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventUndelegate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventRedelegate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.FromStaker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.ToStaker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventWithdrawRewards) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventSlash) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + if m.SlashType != 0 { + n += 1 + sovEvents(uint64(m.SlashType)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventDelegate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDelegate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDelegate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventUndelegate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventUndelegate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventUndelegate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventRedelegate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventRedelegate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventRedelegate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromStaker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromStaker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToStaker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToStaker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventWithdrawRewards) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventWithdrawRewards: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventWithdrawRewards: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSlash) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSlash: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSlash: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SlashType", wireType) + } + m.SlashType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SlashType |= SlashType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/delegation/types/expected_keepers.go b/x/delegation/types/expected_keepers.go new file mode 100644 index 00000000..188e6791 --- /dev/null +++ b/x/delegation/types/expected_keepers.go @@ -0,0 +1,40 @@ +package types + +import ( + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetModuleAddress(moduleName string) sdk.AccAddress +} + +type DistrKeeper interface { + FundCommunityPool(ctx sdk.Context, amount sdk.Coins, sender sdk.AccAddress) error +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToModule(ctx sdk.Context, senderModule, recipientModule string, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error +} + +type PoolKeeper interface { + AssertPoolExists(ctx sdk.Context, poolId uint64) error +} + +type UpgradeKeeper interface { + ScheduleUpgrade(ctx sdk.Context, plan types.Plan) error +} + +type StakersKeeper interface { + DoesStakerExist(ctx sdk.Context, staker string) bool + GetAllStakerAddressesOfPool(ctx sdk.Context, poolId uint64) (stakers []string) + GetValaccountsFromStaker(ctx sdk.Context, stakerAddress string) (val []*stakerstypes.Valaccount) + GetPoolCount(ctx sdk.Context, stakerAddress string) (poolCount uint64) + GetActiveStakers(ctx sdk.Context) []string +} diff --git a/x/delegation/types/genesis.go b/x/delegation/types/genesis.go new file mode 100644 index 00000000..c0dc81dd --- /dev/null +++ b/x/delegation/types/genesis.go @@ -0,0 +1,135 @@ +package types + +import ( + "fmt" + + "github.com/KYVENetwork/chain/util" +) + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + DelegatorList: []Delegator{}, + DelegationEntryList: []DelegationEntry{}, + DelegationDataList: []DelegationData{}, + DelegationSlashList: []DelegationSlash{}, + UndelegationQueueEntryList: []UndelegationQueueEntry{}, + QueueStateUndelegation: QueueState{}, + RedelegationCooldownList: []RedelegationCooldown{}, + } +} + +// Validate performs basic genesis state validation returning an error upon any failure. +func (gs *GenesisState) Validate() error { + if err := gs.validateF1(); err != nil { + return err + } + + if err := gs.validateUnbondingQueue(); err != nil { + return err + } + + if err := gs.validateRedelegation(); err != nil { + return err + } + + return gs.Params.Validate() +} + +func (gs *GenesisState) validateF1() error { + // Check for duplicated index in Delegator + delegatorMap := make(map[string]struct{}) + delegatorKIndexMap := make(map[string]struct{}) + + for _, elem := range gs.DelegatorList { + index := string(DelegatorKey(elem.Staker, elem.Delegator)) + if _, ok := delegatorMap[index]; ok { + return fmt.Errorf("duplicated index for delegator %v", elem) + } + delegatorMap[index] = struct{}{} + + kIndex := string(util.GetByteKey(elem.Staker, elem.KIndex)) + if _, ok := delegatorKIndexMap[kIndex]; ok { + return fmt.Errorf("duplicated k-index for delegator %v", elem) + } + delegatorKIndexMap[kIndex] = struct{}{} + } + + // Check for duplicated index in DelegationEntries + delegationEntriesKIndexMap := make(map[string]struct{}) + for _, elem := range gs.DelegationEntryList { + + index := string(DelegationEntriesKey(elem.Staker, elem.KIndex)) + if _, ok := delegationEntriesKIndexMap[index]; ok { + return fmt.Errorf("duplicated k-index for delegation entry %v", elem) + } + delegationEntriesKIndexMap[index] = struct{}{} + } + + // Check for duplicated index in DelegationEntries + delegationDataMap := make(map[string]struct{}) + + for _, elem := range gs.DelegationDataList { + index := string(DelegationDataKey(elem.Staker)) + if _, ok := delegationDataMap[index]; ok { + return fmt.Errorf("duplicated index for delegation data %v", elem) + } + delegationDataMap[index] = struct{}{} + } + + // Check for duplicated index in SlashEntries + slashMap := make(map[string]struct{}) + + for _, elem := range gs.DelegationSlashList { + index := string(DelegationSlashEntriesKey(elem.Staker, elem.KIndex)) + if _, ok := slashMap[index]; ok { + return fmt.Errorf("duplicated k-index for delegation entry %v", elem) + } + //nolint:all + entryIndex := string(DelegationEntriesKey(elem.Staker, elem.KIndex)) + if _, ok := delegationEntriesKIndexMap[entryIndex]; !ok { + return fmt.Errorf("slash entry pointing to non-existent delegation index: %v", elem) + } + + slashMap[index] = struct{}{} + } + + return nil +} + +func (gs *GenesisState) validateUnbondingQueue() error { + // Check undelegation queue + unbondingMap := make(map[string]struct{}) + + for _, elem := range gs.UndelegationQueueEntryList { + index := string(UndelegationQueueKey(elem.Index)) + if _, ok := unbondingMap[index]; ok { + return fmt.Errorf("duplicated index for unbonding entry %v", elem) + } + if elem.Index > gs.QueueStateUndelegation.HighIndex { + return fmt.Errorf("unbonding entry index too high: %v", elem) + } + if elem.Index < gs.QueueStateUndelegation.LowIndex { + return fmt.Errorf("unbonding entry index too low: %v", elem) + } + + unbondingMap[index] = struct{}{} + } + return nil +} + +func (gs *GenesisState) validateRedelegation() error { + // Check undelegation queue + redelegationMap := make(map[string]struct{}) + + for _, elem := range gs.RedelegationCooldownList { + index := string(RedelegationCooldownKey(elem.Address, elem.CreationDate)) + if _, ok := redelegationMap[index]; ok { + return fmt.Errorf("duplicated index for redelegation entry %v", elem) + } + + redelegationMap[index] = struct{}{} + } + return nil +} diff --git a/x/delegation/types/genesis.pb.go b/x/delegation/types/genesis.pb.go new file mode 100644 index 00000000..7951a355 --- /dev/null +++ b/x/delegation/types/genesis.pb.go @@ -0,0 +1,771 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/delegation/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the delegation module's genesis state. +type GenesisState struct { + // params defines all the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + // delegator_list ... + DelegatorList []Delegator `protobuf:"bytes,2,rep,name=delegator_list,json=delegatorList,proto3" json:"delegator_list"` + // delegation_entry_list ... + DelegationEntryList []DelegationEntry `protobuf:"bytes,3,rep,name=delegation_entry_list,json=delegationEntryList,proto3" json:"delegation_entry_list"` + // delegation_data_list ... + DelegationDataList []DelegationData `protobuf:"bytes,4,rep,name=delegation_data_list,json=delegationDataList,proto3" json:"delegation_data_list"` + // delegation_slash_list ... + DelegationSlashList []DelegationSlash `protobuf:"bytes,5,rep,name=delegation_slash_list,json=delegationSlashList,proto3" json:"delegation_slash_list"` + // undelegation_queue_entry_list ... + UndelegationQueueEntryList []UndelegationQueueEntry `protobuf:"bytes,6,rep,name=undelegation_queue_entry_list,json=undelegationQueueEntryList,proto3" json:"undelegation_queue_entry_list"` + // queue_state_undelegation ... + QueueStateUndelegation QueueState `protobuf:"bytes,7,opt,name=queue_state_undelegation,json=queueStateUndelegation,proto3" json:"queue_state_undelegation"` + // redelegation_cooldown_list ... + RedelegationCooldownList []RedelegationCooldown `protobuf:"bytes,8,rep,name=redelegation_cooldown_list,json=redelegationCooldownList,proto3" json:"redelegation_cooldown_list"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_0bd28fed64b7905b, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetDelegatorList() []Delegator { + if m != nil { + return m.DelegatorList + } + return nil +} + +func (m *GenesisState) GetDelegationEntryList() []DelegationEntry { + if m != nil { + return m.DelegationEntryList + } + return nil +} + +func (m *GenesisState) GetDelegationDataList() []DelegationData { + if m != nil { + return m.DelegationDataList + } + return nil +} + +func (m *GenesisState) GetDelegationSlashList() []DelegationSlash { + if m != nil { + return m.DelegationSlashList + } + return nil +} + +func (m *GenesisState) GetUndelegationQueueEntryList() []UndelegationQueueEntry { + if m != nil { + return m.UndelegationQueueEntryList + } + return nil +} + +func (m *GenesisState) GetQueueStateUndelegation() QueueState { + if m != nil { + return m.QueueStateUndelegation + } + return QueueState{} +} + +func (m *GenesisState) GetRedelegationCooldownList() []RedelegationCooldown { + if m != nil { + return m.RedelegationCooldownList + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "kyve.delegation.v1beta1.GenesisState") +} + +func init() { + proto.RegisterFile("kyve/delegation/v1beta1/genesis.proto", fileDescriptor_0bd28fed64b7905b) +} + +var fileDescriptor_0bd28fed64b7905b = []byte{ + // 436 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0xef, 0xd2, 0x30, + 0x1c, 0xc6, 0x37, 0x7f, 0x38, 0x4d, 0x51, 0x0f, 0x13, 0x75, 0x59, 0xe2, 0x20, 0xa8, 0x71, 0x17, + 0xd7, 0x80, 0x67, 0x2f, 0x08, 0x31, 0x46, 0xe3, 0x1f, 0x88, 0x26, 0x7a, 0x59, 0xba, 0xad, 0x19, + 0x0b, 0x63, 0x85, 0xb5, 0xe3, 0xcf, 0xbb, 0xf0, 0xea, 0x3b, 0xe2, 0xc8, 0xd1, 0x93, 0x31, 0xf0, + 0x46, 0xcc, 0xda, 0x0a, 0x25, 0xb2, 0xfc, 0xb8, 0x2d, 0xdf, 0x3d, 0xcf, 0xf3, 0xe9, 0xd3, 0x7c, + 0x0b, 0x9e, 0x4d, 0xd6, 0x0b, 0x0c, 0x23, 0x9c, 0xe2, 0x18, 0xb1, 0x84, 0x64, 0x70, 0xd1, 0x09, + 0x30, 0x43, 0x1d, 0x18, 0xe3, 0x0c, 0xd3, 0x84, 0x7a, 0xb3, 0x9c, 0x30, 0x62, 0x3e, 0x2a, 0x65, + 0xde, 0x51, 0xe6, 0x49, 0x99, 0xdd, 0x88, 0x49, 0x4c, 0xb8, 0x06, 0x96, 0x5f, 0x42, 0x6e, 0xbb, + 0x55, 0xa9, 0x4a, 0x82, 0x50, 0x3e, 0xad, 0x52, 0xce, 0x50, 0x8e, 0xa6, 0x12, 0xdf, 0xfe, 0x69, + 0x80, 0x3b, 0x6f, 0xc4, 0x81, 0x46, 0x0c, 0x31, 0x6c, 0xbe, 0x02, 0x86, 0x10, 0x58, 0x7a, 0x4b, + 0x77, 0xeb, 0xdd, 0xa6, 0x57, 0x71, 0x40, 0xef, 0x13, 0x97, 0xf5, 0x6a, 0x9b, 0xdf, 0x4d, 0x6d, + 0x28, 0x4d, 0xe6, 0x47, 0x70, 0x4f, 0x4a, 0x49, 0xee, 0xa7, 0x09, 0x65, 0xd6, 0x8d, 0xd6, 0x95, + 0x5b, 0xef, 0xb6, 0x2b, 0x63, 0xfa, 0xff, 0xe4, 0x32, 0xe9, 0xee, 0xc1, 0xff, 0x3e, 0xa1, 0xcc, + 0x0c, 0xc0, 0x83, 0xa3, 0xc9, 0xc7, 0x19, 0xcb, 0xd7, 0x22, 0xf7, 0x8a, 0xe7, 0xba, 0xd7, 0xe5, + 0x26, 0x24, 0x1b, 0x94, 0x26, 0x99, 0x7e, 0x3f, 0x3a, 0x1d, 0x73, 0x86, 0x0f, 0x1a, 0x0a, 0x23, + 0x42, 0x0c, 0x09, 0x44, 0x8d, 0x23, 0x9e, 0x5f, 0x80, 0xe8, 0x23, 0x86, 0x24, 0xc1, 0x8c, 0x4e, + 0xa6, 0x67, 0x4a, 0xd0, 0x14, 0xd1, 0xb1, 0x20, 0xdc, 0xbc, 0xb8, 0xc4, 0xa8, 0x34, 0xfd, 0x5f, + 0x82, 0x8f, 0x39, 0x63, 0x05, 0x1e, 0x17, 0x99, 0x42, 0x99, 0x17, 0xb8, 0xc0, 0xea, 0x85, 0x19, + 0x9c, 0x05, 0x2b, 0x59, 0x5f, 0x14, 0xf7, 0xe7, 0xd2, 0xac, 0xde, 0x9b, 0x5d, 0x9c, 0xfd, 0xcb, + 0xc9, 0x21, 0xb0, 0x04, 0x8c, 0x96, 0x1b, 0xe4, 0xab, 0x4a, 0xeb, 0x16, 0x5f, 0xa2, 0x27, 0x95, + 0x50, 0x1e, 0xc5, 0x37, 0x4f, 0x82, 0x1e, 0xce, 0x0f, 0x13, 0xf5, 0x40, 0xe6, 0x1c, 0xd8, 0x39, + 0x56, 0xea, 0x85, 0x84, 0xa4, 0x11, 0x59, 0x66, 0xa2, 0xdb, 0x6d, 0xde, 0xed, 0x45, 0x25, 0x66, + 0xa8, 0x58, 0x5f, 0x4b, 0xa7, 0x04, 0x5a, 0xf9, 0x99, 0x7f, 0x65, 0xaf, 0xde, 0xdb, 0xcd, 0xce, + 0xd1, 0xb7, 0x3b, 0x47, 0xff, 0xb3, 0x73, 0xf4, 0x1f, 0x7b, 0x47, 0xdb, 0xee, 0x1d, 0xed, 0xd7, + 0xde, 0xd1, 0xbe, 0xc3, 0x38, 0x61, 0xe3, 0x22, 0xf0, 0x42, 0x32, 0x85, 0xef, 0xbe, 0x7d, 0x1d, + 0x7c, 0xc0, 0x6c, 0x49, 0xf2, 0x09, 0x0c, 0xc7, 0x28, 0xc9, 0xe0, 0x4a, 0x7d, 0x75, 0x6c, 0x3d, + 0xc3, 0x34, 0x30, 0xf8, 0x6b, 0x7b, 0xf9, 0x37, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xa4, 0xfe, 0x14, + 0x15, 0x04, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RedelegationCooldownList) > 0 { + for iNdEx := len(m.RedelegationCooldownList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RedelegationCooldownList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.QueueStateUndelegation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.UndelegationQueueEntryList) > 0 { + for iNdEx := len(m.UndelegationQueueEntryList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.UndelegationQueueEntryList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.DelegationSlashList) > 0 { + for iNdEx := len(m.DelegationSlashList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DelegationSlashList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.DelegationDataList) > 0 { + for iNdEx := len(m.DelegationDataList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DelegationDataList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.DelegationEntryList) > 0 { + for iNdEx := len(m.DelegationEntryList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DelegationEntryList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.DelegatorList) > 0 { + for iNdEx := len(m.DelegatorList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DelegatorList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.DelegatorList) > 0 { + for _, e := range m.DelegatorList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.DelegationEntryList) > 0 { + for _, e := range m.DelegationEntryList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.DelegationDataList) > 0 { + for _, e := range m.DelegationDataList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.DelegationSlashList) > 0 { + for _, e := range m.DelegationSlashList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.UndelegationQueueEntryList) > 0 { + for _, e := range m.UndelegationQueueEntryList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.QueueStateUndelegation.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.RedelegationCooldownList) > 0 { + for _, e := range m.RedelegationCooldownList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegatorList = append(m.DelegatorList, Delegator{}) + if err := m.DelegatorList[len(m.DelegatorList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegationEntryList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegationEntryList = append(m.DelegationEntryList, DelegationEntry{}) + if err := m.DelegationEntryList[len(m.DelegationEntryList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegationDataList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegationDataList = append(m.DelegationDataList, DelegationData{}) + if err := m.DelegationDataList[len(m.DelegationDataList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegationSlashList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DelegationSlashList = append(m.DelegationSlashList, DelegationSlash{}) + if err := m.DelegationSlashList[len(m.DelegationSlashList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UndelegationQueueEntryList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UndelegationQueueEntryList = append(m.UndelegationQueueEntryList, UndelegationQueueEntry{}) + if err := m.UndelegationQueueEntryList[len(m.UndelegationQueueEntryList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueStateUndelegation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.QueueStateUndelegation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedelegationCooldownList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedelegationCooldownList = append(m.RedelegationCooldownList, RedelegationCooldown{}) + if err := m.RedelegationCooldownList[len(m.RedelegationCooldownList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/delegation/types/keys.go b/x/delegation/types/keys.go new file mode 100644 index 00000000..cfc6295c --- /dev/null +++ b/x/delegation/types/keys.go @@ -0,0 +1,96 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +const ( + // ModuleName defines the module name + ModuleName = "delegation" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey defines the module's message routing key + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_delegation" +) + +var ParamsKey = []byte{0x00} + +var StakerIndexKeyPrefix = []byte{1} // StakerIndexPoolCountKeyPrefix = []byte{1} + +var ( + // DelegatorKeyPrefix is the prefix to retrieve all Delegator entries + DelegatorKeyPrefix = []byte{1, 0} + + // DelegatorKeyPrefixIndex2 is the prefix for a different key order for the DelegatorKeyPrefix + DelegatorKeyPrefixIndex2 = []byte{1, 1} + + // DelegationEntriesKeyPrefix is the prefix to retrieve all DelegationEntries + DelegationEntriesKeyPrefix = []byte{2} + + // DelegationDataKeyPrefix ... + DelegationDataKeyPrefix = []byte{3} + + // DelegationSlashEntriesKeyPrefix ... + DelegationSlashEntriesKeyPrefix = []byte{4} + + // QueueKey ... + QueueKey = []byte{5} + + // UndelegationQueueKeyPrefix ... + UndelegationQueueKeyPrefix = []byte{6, 0} + + // UndelegationQueueKeyPrefixIndex2 ... + UndelegationQueueKeyPrefixIndex2 = []byte{6, 1} + + // RedelegationCooldownPrefix ... + RedelegationCooldownPrefix = []byte{7} +) + +// DelegatorKey returns the store Key to retrieve a Delegator from the index fields +func DelegatorKey(stakerAddress string, delegatorAddress string) []byte { + return util.GetByteKey(stakerAddress, delegatorAddress) +} + +// DelegatorKeyIndex2 returns the store Key to retrieve a Delegator from the index fields +func DelegatorKeyIndex2(delegatorAddress string, stakerAddress string) []byte { + return util.GetByteKey(delegatorAddress, stakerAddress) +} + +// DelegationEntriesKey returns the store Key to retrieve a DelegationEntries from the index fields +func DelegationEntriesKey(stakerAddress string, kIndex uint64) []byte { + return util.GetByteKey(stakerAddress, kIndex) +} + +// DelegationDataKey returns the store Key to retrieve a DelegationPoolData from the index fields +func DelegationDataKey(stakerAddress string) []byte { + return util.GetByteKey(stakerAddress) +} + +func UndelegationQueueKey(kIndex uint64) []byte { + return util.GetByteKey(kIndex) +} + +func UndelegationQueueKeyIndex2(stakerAddress string, kIndex uint64) []byte { + return util.GetByteKey(stakerAddress, kIndex) +} + +func RedelegationCooldownKey(delegator string, block uint64) []byte { + return util.GetByteKey(delegator, block) +} + +func DelegationSlashEntriesKey(stakerAddress string, kIndex uint64) []byte { + return util.GetByteKey(stakerAddress, kIndex) +} + +func StakerIndexKey(amount uint64, stakerAddress string) []byte { + return util.GetByteKey(amount, stakerAddress) +} + +//func StakerIndexByPoolCountKey(poolCount uint64, amount uint64, stakerAddress string) []byte { +// return util.GetByteKey(poolCount, amount, stakerAddress) +//} diff --git a/x/delegation/types/message_delegate.go b/x/delegation/types/message_delegate.go new file mode 100644 index 00000000..bfd1ae6b --- /dev/null +++ b/x/delegation/types/message_delegate.go @@ -0,0 +1,45 @@ +package types + +import ( + sdkErrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgDelegate = "delegate" + +var _ sdk.Msg = &MsgDelegate{} + +func (msg *MsgDelegate) Route() string { + return RouterKey +} + +func (msg *MsgDelegate) Type() string { + return TypeMsgDelegate +} + +func (msg *MsgDelegate) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgDelegate) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgDelegate) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + _, err = sdk.AccAddressFromBech32(msg.Staker) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid staker address (%s)", err) + } + return nil +} diff --git a/x/delegation/types/message_redelegate.go b/x/delegation/types/message_redelegate.go new file mode 100644 index 00000000..7098440d --- /dev/null +++ b/x/delegation/types/message_redelegate.go @@ -0,0 +1,40 @@ +package types + +import ( + sdkErrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgRedelegate = "redelegate" + +var _ sdk.Msg = &MsgRedelegate{} + +func (msg *MsgRedelegate) Route() string { + return RouterKey +} + +func (msg *MsgRedelegate) Type() string { + return TypeMsgRedelegate +} + +func (msg *MsgRedelegate) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgRedelegate) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgRedelegate) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/delegation/types/message_undelegate.go b/x/delegation/types/message_undelegate.go new file mode 100644 index 00000000..69fa4a32 --- /dev/null +++ b/x/delegation/types/message_undelegate.go @@ -0,0 +1,46 @@ +package types + +import ( + sdkErrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgUndelegate = "undelegate" + +var _ sdk.Msg = &MsgUndelegate{} + +func (msg *MsgUndelegate) Route() string { + return RouterKey +} + +func (msg *MsgUndelegate) Type() string { + return TypeMsgUndelegate +} + +func (msg *MsgUndelegate) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgUndelegate) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgUndelegate) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + _, err = sdk.AccAddressFromBech32(msg.Staker) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid staker address (%s)", err) + } + + return nil +} diff --git a/x/delegation/types/message_withdraw_rewards.go b/x/delegation/types/message_withdraw_rewards.go new file mode 100644 index 00000000..d567758b --- /dev/null +++ b/x/delegation/types/message_withdraw_rewards.go @@ -0,0 +1,46 @@ +package types + +import ( + sdkErrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgWithdrawRewards = "withdraw_rewards" + +var _ sdk.Msg = &MsgWithdrawRewards{} + +func (msg *MsgWithdrawRewards) Route() string { + return RouterKey +} + +func (msg *MsgWithdrawRewards) Type() string { + return TypeMsgWithdrawRewards +} + +func (msg *MsgWithdrawRewards) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgWithdrawRewards) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgWithdrawRewards) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + _, err = sdk.AccAddressFromBech32(msg.Staker) + if err != nil { + return sdkErrors.Wrapf(errorsTypes.ErrInvalidAddress, "invalid staker address (%s)", err) + } + + return nil +} diff --git a/x/delegation/types/msgs.go b/x/delegation/types/msgs.go new file mode 100644 index 00000000..e568d11e --- /dev/null +++ b/x/delegation/types/msgs.go @@ -0,0 +1,35 @@ +package types + +import ( + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ sdk.Msg = &MsgUpdateParams{} + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (msg *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + params := DefaultParams() + if err := json.Unmarshal([]byte(msg.Payload), ¶ms); err != nil { + return err + } + + if err := params.Validate(); err != nil { + return err + } + + return nil +} diff --git a/x/delegation/types/params.go b/x/delegation/types/params.go new file mode 100644 index 00000000..6cd04f4b --- /dev/null +++ b/x/delegation/types/params.go @@ -0,0 +1,83 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +// DefaultUnbondingDelegationTime ... +var DefaultUnbondingDelegationTime = uint64(60 * 60 * 24 * 5) + +// DefaultRedelegationCooldown ... +var DefaultRedelegationCooldown = uint64(60 * 60 * 24 * 5) + +// DefaultRedelegationMaxAmount ... +var DefaultRedelegationMaxAmount = uint64(5) + +// DefaultVoteSlash ... +var DefaultVoteSlash = "0.1" + +// DefaultUploadSlash ... +var DefaultUploadSlash = "0.2" + +// DefaultTimeoutSlash ... +var DefaultTimeoutSlash = "0.02" + +// NewParams creates a new Params instance +func NewParams( + unbondingDelegationTime uint64, + redelegationCooldown uint64, + redelegationMaxAmount uint64, + voteSlash string, + uploadSlash string, + timeoutSlash string, +) Params { + return Params{ + UnbondingDelegationTime: unbondingDelegationTime, + RedelegationCooldown: redelegationCooldown, + RedelegationMaxAmount: redelegationMaxAmount, + VoteSlash: voteSlash, + UploadSlash: uploadSlash, + TimeoutSlash: timeoutSlash, + } +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams( + DefaultUnbondingDelegationTime, + DefaultRedelegationCooldown, + DefaultRedelegationMaxAmount, + DefaultVoteSlash, + DefaultUploadSlash, + DefaultTimeoutSlash, + ) +} + +// Validate validates the set of params +func (p Params) Validate() error { + if err := util.ValidateUint64(p.UnbondingDelegationTime); err != nil { + return err + } + + if err := util.ValidateUint64(p.RedelegationCooldown); err != nil { + return err + } + + if err := util.ValidateUint64(p.RedelegationMaxAmount); err != nil { + return err + } + + if err := util.ValidatePercentage(p.VoteSlash); err != nil { + return err + } + + if err := util.ValidatePercentage(p.UploadSlash); err != nil { + return err + } + + if err := util.ValidatePercentage(p.TimeoutSlash); err != nil { + return err + } + + return nil +} diff --git a/x/delegation/types/params.pb.go b/x/delegation/types/params.pb.go new file mode 100644 index 00000000..89fde3a9 --- /dev/null +++ b/x/delegation/types/params.pb.go @@ -0,0 +1,540 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/delegation/v1beta1/params.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the delegation module parameters. +type Params struct { + // unbonding_delegation_time ... + UnbondingDelegationTime uint64 `protobuf:"varint,1,opt,name=unbonding_delegation_time,json=unbondingDelegationTime,proto3" json:"unbonding_delegation_time,omitempty"` + // unbonding_delegation_time ... + RedelegationCooldown uint64 `protobuf:"varint,2,opt,name=redelegation_cooldown,json=redelegationCooldown,proto3" json:"redelegation_cooldown,omitempty"` + // unbonding_delegation_time ... + RedelegationMaxAmount uint64 `protobuf:"varint,3,opt,name=redelegation_max_amount,json=redelegationMaxAmount,proto3" json:"redelegation_max_amount,omitempty"` + // vote_slash ... + VoteSlash string `protobuf:"bytes,4,opt,name=vote_slash,json=voteSlash,proto3" json:"vote_slash,omitempty"` + // upload_slash ... + UploadSlash string `protobuf:"bytes,5,opt,name=upload_slash,json=uploadSlash,proto3" json:"upload_slash,omitempty"` + // timeout_slash ... + TimeoutSlash string `protobuf:"bytes,6,opt,name=timeout_slash,json=timeoutSlash,proto3" json:"timeout_slash,omitempty"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_17019e1d49c878a9, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetUnbondingDelegationTime() uint64 { + if m != nil { + return m.UnbondingDelegationTime + } + return 0 +} + +func (m *Params) GetRedelegationCooldown() uint64 { + if m != nil { + return m.RedelegationCooldown + } + return 0 +} + +func (m *Params) GetRedelegationMaxAmount() uint64 { + if m != nil { + return m.RedelegationMaxAmount + } + return 0 +} + +func (m *Params) GetVoteSlash() string { + if m != nil { + return m.VoteSlash + } + return "" +} + +func (m *Params) GetUploadSlash() string { + if m != nil { + return m.UploadSlash + } + return "" +} + +func (m *Params) GetTimeoutSlash() string { + if m != nil { + return m.TimeoutSlash + } + return "" +} + +func init() { + proto.RegisterType((*Params)(nil), "kyve.delegation.v1beta1.Params") +} + +func init() { + proto.RegisterFile("kyve/delegation/v1beta1/params.proto", fileDescriptor_17019e1d49c878a9) +} + +var fileDescriptor_17019e1d49c878a9 = []byte{ + // 307 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0xd1, 0xbb, 0x4e, 0xf3, 0x30, + 0x1c, 0x05, 0xf0, 0xba, 0x5f, 0xbf, 0x4a, 0x35, 0x65, 0xb1, 0x40, 0x0d, 0x03, 0x56, 0xb9, 0x0c, + 0x9d, 0x62, 0x55, 0x95, 0x18, 0xd8, 0xb8, 0x0d, 0x08, 0x81, 0x50, 0x41, 0x48, 0xb0, 0x44, 0x4e, + 0x63, 0xb5, 0x56, 0x63, 0xff, 0xa3, 0xc4, 0xe9, 0xe5, 0x2d, 0x18, 0x79, 0x24, 0xc6, 0x8e, 0x8c, + 0x28, 0x79, 0x11, 0x14, 0x37, 0x6a, 0xd3, 0xf5, 0x9c, 0xdf, 0xb1, 0x64, 0xfd, 0xf1, 0xf9, 0x74, + 0x39, 0x13, 0x2c, 0x10, 0xa1, 0x18, 0x73, 0x23, 0x41, 0xb3, 0x59, 0xdf, 0x17, 0x86, 0xf7, 0x59, + 0xc4, 0x63, 0xae, 0x12, 0x37, 0x8a, 0xc1, 0x00, 0xe9, 0x14, 0xca, 0xdd, 0x2a, 0xb7, 0x54, 0xa7, + 0x5f, 0x75, 0xdc, 0x7c, 0xb6, 0x92, 0x5c, 0xe2, 0xa3, 0x54, 0xfb, 0xa0, 0x03, 0xa9, 0xc7, 0xde, + 0x96, 0x7a, 0x46, 0x2a, 0xe1, 0xa0, 0x2e, 0xea, 0x35, 0x86, 0x9d, 0x0d, 0xb8, 0xdd, 0xf4, 0xaf, + 0x52, 0x09, 0x32, 0xc0, 0x87, 0xb1, 0xa8, 0x6c, 0x46, 0x00, 0x61, 0x00, 0x73, 0xed, 0xd4, 0xed, + 0xee, 0xa0, 0x5a, 0xde, 0x94, 0x1d, 0xb9, 0xc0, 0x9d, 0x9d, 0x91, 0xe2, 0x0b, 0x8f, 0x2b, 0x48, + 0xb5, 0x71, 0xfe, 0xd9, 0xd9, 0xce, 0x9b, 0x8f, 0x7c, 0x71, 0x65, 0x4b, 0x72, 0x8c, 0xf1, 0x0c, + 0x8c, 0xf0, 0x92, 0x90, 0x27, 0x13, 0xa7, 0xd1, 0x45, 0xbd, 0xd6, 0xb0, 0x55, 0x24, 0x2f, 0x45, + 0x40, 0x4e, 0x70, 0x3b, 0x8d, 0x42, 0xe0, 0x41, 0x09, 0xfe, 0x5b, 0xb0, 0xb7, 0xce, 0xd6, 0xe4, + 0x0c, 0xef, 0x17, 0xbf, 0x82, 0xd4, 0x94, 0xa6, 0x69, 0x4d, 0xbb, 0x0c, 0x2d, 0xba, 0xbe, 0xff, + 0xce, 0x28, 0x5a, 0x65, 0x14, 0xfd, 0x66, 0x14, 0x7d, 0xe6, 0xb4, 0xb6, 0xca, 0x69, 0xed, 0x27, + 0xa7, 0xb5, 0x0f, 0x36, 0x96, 0x66, 0x92, 0xfa, 0xee, 0x08, 0x14, 0x7b, 0x78, 0x7f, 0xbb, 0x7b, + 0x12, 0x66, 0x0e, 0xf1, 0x94, 0x8d, 0x26, 0x5c, 0x6a, 0xb6, 0xa8, 0x5e, 0xc3, 0x2c, 0x23, 0x91, + 0xf8, 0x4d, 0x7b, 0x85, 0xc1, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x87, 0x17, 0x6d, 0x46, 0xad, + 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TimeoutSlash) > 0 { + i -= len(m.TimeoutSlash) + copy(dAtA[i:], m.TimeoutSlash) + i = encodeVarintParams(dAtA, i, uint64(len(m.TimeoutSlash))) + i-- + dAtA[i] = 0x32 + } + if len(m.UploadSlash) > 0 { + i -= len(m.UploadSlash) + copy(dAtA[i:], m.UploadSlash) + i = encodeVarintParams(dAtA, i, uint64(len(m.UploadSlash))) + i-- + dAtA[i] = 0x2a + } + if len(m.VoteSlash) > 0 { + i -= len(m.VoteSlash) + copy(dAtA[i:], m.VoteSlash) + i = encodeVarintParams(dAtA, i, uint64(len(m.VoteSlash))) + i-- + dAtA[i] = 0x22 + } + if m.RedelegationMaxAmount != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.RedelegationMaxAmount)) + i-- + dAtA[i] = 0x18 + } + if m.RedelegationCooldown != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.RedelegationCooldown)) + i-- + dAtA[i] = 0x10 + } + if m.UnbondingDelegationTime != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.UnbondingDelegationTime)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UnbondingDelegationTime != 0 { + n += 1 + sovParams(uint64(m.UnbondingDelegationTime)) + } + if m.RedelegationCooldown != 0 { + n += 1 + sovParams(uint64(m.RedelegationCooldown)) + } + if m.RedelegationMaxAmount != 0 { + n += 1 + sovParams(uint64(m.RedelegationMaxAmount)) + } + l = len(m.VoteSlash) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + l = len(m.UploadSlash) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + l = len(m.TimeoutSlash) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnbondingDelegationTime", wireType) + } + m.UnbondingDelegationTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnbondingDelegationTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RedelegationCooldown", wireType) + } + m.RedelegationCooldown = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RedelegationCooldown |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RedelegationMaxAmount", wireType) + } + m.RedelegationMaxAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RedelegationMaxAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteSlash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteSlash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadSlash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UploadSlash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSlash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TimeoutSlash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/delegation/types/query.pb.go b/x/delegation/types/query.pb.go new file mode 100644 index 00000000..986184c6 --- /dev/null +++ b/x/delegation/types/query.pb.go @@ -0,0 +1,538 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/delegation/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_9afd23b59df61182, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9afd23b59df61182, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "kyve.delegation.v1beta1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "kyve.delegation.v1beta1.QueryParamsResponse") +} + +func init() { + proto.RegisterFile("kyve/delegation/v1beta1/query.proto", fileDescriptor_9afd23b59df61182) +} + +var fileDescriptor_9afd23b59df61182 = []byte{ + // 290 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xce, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0x49, 0xcd, 0x49, 0x4d, 0x4f, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, + 0x2d, 0x49, 0x34, 0xd4, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x07, 0x29, 0xd2, 0x43, 0x28, 0xd2, 0x83, 0x2a, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, + 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0xa5, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, + 0x0b, 0x32, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0xc0, 0x9a, 0x8a, 0xa1, 0xb2, 0x2a, 0xb8, 0x6c, + 0x2c, 0x48, 0x2c, 0x4a, 0xcc, 0x85, 0xaa, 0x52, 0x12, 0xe1, 0x12, 0x0a, 0x04, 0xb9, 0x20, 0x00, + 0x2c, 0x18, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xa2, 0x14, 0xc2, 0x25, 0x8c, 0x22, 0x5a, 0x5c, + 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xcb, 0xc5, 0x06, 0xd1, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, + 0x6d, 0x24, 0xaf, 0x87, 0xc3, 0xc1, 0x7a, 0x10, 0x8d, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, + 0x41, 0x35, 0x19, 0x4d, 0x63, 0xe4, 0x62, 0x05, 0x1b, 0x2b, 0xd4, 0xc3, 0xc8, 0xc5, 0x06, 0x51, + 0x22, 0xa4, 0x8d, 0xd3, 0x0c, 0x4c, 0x77, 0x49, 0xe9, 0x10, 0xa7, 0x18, 0xe2, 0x5c, 0x25, 0xf5, + 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x29, 0x0a, 0xc9, 0xeb, 0xe3, 0x0f, 0x0a, 0x27, 0xcf, 0x13, 0x8f, + 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, + 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, + 0x4b, 0xce, 0xcf, 0xd5, 0xf7, 0x8e, 0x0c, 0x73, 0xf5, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0xd6, + 0x4f, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0xaf, 0x40, 0x36, 0xb3, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, + 0x0d, 0x1c, 0xac, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0x41, 0xf5, 0x4d, 0xf0, 0x01, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.delegation.v1beta1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.delegation.v1beta1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.delegation.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/delegation/v1beta1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/delegation/types/query.pb.gw.go b/x/delegation/types/query.pb.gw.go new file mode 100644 index 00000000..2702a9cd --- /dev/null +++ b/x/delegation/types/query.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/delegation/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "delegation", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/x/delegation/types/tx.pb.go b/x/delegation/types/tx.pb.go new file mode 100644 index 00000000..a72c1c79 --- /dev/null +++ b/x/delegation/types/tx.pb.go @@ -0,0 +1,2288 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/delegation/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgDelegate ... +type MsgDelegate struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgDelegate) Reset() { *m = MsgDelegate{} } +func (m *MsgDelegate) String() string { return proto.CompactTextString(m) } +func (*MsgDelegate) ProtoMessage() {} +func (*MsgDelegate) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{0} +} +func (m *MsgDelegate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDelegate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDelegate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDelegate) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDelegate.Merge(m, src) +} +func (m *MsgDelegate) XXX_Size() int { + return m.Size() +} +func (m *MsgDelegate) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDelegate.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDelegate proto.InternalMessageInfo + +func (m *MsgDelegate) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgDelegate) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *MsgDelegate) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgDelegatePoolResponse defines the Msg/DelegatePool response type. +type MsgDelegateResponse struct { +} + +func (m *MsgDelegateResponse) Reset() { *m = MsgDelegateResponse{} } +func (m *MsgDelegateResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDelegateResponse) ProtoMessage() {} +func (*MsgDelegateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{1} +} +func (m *MsgDelegateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDelegateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDelegateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDelegateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDelegateResponse.Merge(m, src) +} +func (m *MsgDelegateResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDelegateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDelegateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDelegateResponse proto.InternalMessageInfo + +// MsgWithdrawPool defines a SDK message for withdrawing delegation rewards from a specific pool. +type MsgWithdrawRewards struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` +} + +func (m *MsgWithdrawRewards) Reset() { *m = MsgWithdrawRewards{} } +func (m *MsgWithdrawRewards) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawRewards) ProtoMessage() {} +func (*MsgWithdrawRewards) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{2} +} +func (m *MsgWithdrawRewards) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawRewards) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawRewards.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawRewards) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawRewards.Merge(m, src) +} +func (m *MsgWithdrawRewards) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawRewards) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawRewards.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawRewards proto.InternalMessageInfo + +func (m *MsgWithdrawRewards) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgWithdrawRewards) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +// MsgWithdrawPoolResponse defines the Msg/WithdrawPool response type. +type MsgWithdrawRewardsResponse struct { +} + +func (m *MsgWithdrawRewardsResponse) Reset() { *m = MsgWithdrawRewardsResponse{} } +func (m *MsgWithdrawRewardsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawRewardsResponse) ProtoMessage() {} +func (*MsgWithdrawRewardsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{3} +} +func (m *MsgWithdrawRewardsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawRewardsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawRewardsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawRewardsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawRewardsResponse.Merge(m, src) +} +func (m *MsgWithdrawRewardsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawRewardsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawRewardsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawRewardsResponse proto.InternalMessageInfo + +// MsgUndelegatePool defines a SDK message for undelegating from a specific pool. +type MsgUndelegate struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgUndelegate) Reset() { *m = MsgUndelegate{} } +func (m *MsgUndelegate) String() string { return proto.CompactTextString(m) } +func (*MsgUndelegate) ProtoMessage() {} +func (*MsgUndelegate) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{4} +} +func (m *MsgUndelegate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUndelegate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUndelegate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUndelegate) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUndelegate.Merge(m, src) +} +func (m *MsgUndelegate) XXX_Size() int { + return m.Size() +} +func (m *MsgUndelegate) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUndelegate.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUndelegate proto.InternalMessageInfo + +func (m *MsgUndelegate) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgUndelegate) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *MsgUndelegate) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgUndelegatePoolResponse defines the Msg/UndelegatePool response type. +type MsgUndelegateResponse struct { +} + +func (m *MsgUndelegateResponse) Reset() { *m = MsgUndelegateResponse{} } +func (m *MsgUndelegateResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUndelegateResponse) ProtoMessage() {} +func (*MsgUndelegateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{5} +} +func (m *MsgUndelegateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUndelegateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUndelegateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUndelegateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUndelegateResponse.Merge(m, src) +} +func (m *MsgUndelegateResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUndelegateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUndelegateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUndelegateResponse proto.InternalMessageInfo + +// MsgRedelegatePool defines a SDK message for redelegating from a +// staker in a pool to another staker in the same or another pool +type MsgRedelegate struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // staker ... + FromStaker string `protobuf:"bytes,2,opt,name=from_staker,json=fromStaker,proto3" json:"from_staker,omitempty"` + // staker ... + ToStaker string `protobuf:"bytes,3,opt,name=to_staker,json=toStaker,proto3" json:"to_staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgRedelegate) Reset() { *m = MsgRedelegate{} } +func (m *MsgRedelegate) String() string { return proto.CompactTextString(m) } +func (*MsgRedelegate) ProtoMessage() {} +func (*MsgRedelegate) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{6} +} +func (m *MsgRedelegate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRedelegate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRedelegate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRedelegate) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRedelegate.Merge(m, src) +} +func (m *MsgRedelegate) XXX_Size() int { + return m.Size() +} +func (m *MsgRedelegate) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRedelegate.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRedelegate proto.InternalMessageInfo + +func (m *MsgRedelegate) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgRedelegate) GetFromStaker() string { + if m != nil { + return m.FromStaker + } + return "" +} + +func (m *MsgRedelegate) GetToStaker() string { + if m != nil { + return m.ToStaker + } + return "" +} + +func (m *MsgRedelegate) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgUndelegatePoolResponse defines the Msg/UndelegatePool response type. +type MsgRedelegateResponse struct { +} + +func (m *MsgRedelegateResponse) Reset() { *m = MsgRedelegateResponse{} } +func (m *MsgRedelegateResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRedelegateResponse) ProtoMessage() {} +func (*MsgRedelegateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{7} +} +func (m *MsgRedelegateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRedelegateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRedelegateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRedelegateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRedelegateResponse.Merge(m, src) +} +func (m *MsgRedelegateResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRedelegateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRedelegateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRedelegateResponse proto.InternalMessageInfo + +// MsgUpdateParams defines a SDK message for updating the module parameters. +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // payload defines the x/delegation parameters to update. + Payload string `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{8} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cfef676107453bda, []int{9} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgDelegate)(nil), "kyve.delegation.v1beta1.MsgDelegate") + proto.RegisterType((*MsgDelegateResponse)(nil), "kyve.delegation.v1beta1.MsgDelegateResponse") + proto.RegisterType((*MsgWithdrawRewards)(nil), "kyve.delegation.v1beta1.MsgWithdrawRewards") + proto.RegisterType((*MsgWithdrawRewardsResponse)(nil), "kyve.delegation.v1beta1.MsgWithdrawRewardsResponse") + proto.RegisterType((*MsgUndelegate)(nil), "kyve.delegation.v1beta1.MsgUndelegate") + proto.RegisterType((*MsgUndelegateResponse)(nil), "kyve.delegation.v1beta1.MsgUndelegateResponse") + proto.RegisterType((*MsgRedelegate)(nil), "kyve.delegation.v1beta1.MsgRedelegate") + proto.RegisterType((*MsgRedelegateResponse)(nil), "kyve.delegation.v1beta1.MsgRedelegateResponse") + proto.RegisterType((*MsgUpdateParams)(nil), "kyve.delegation.v1beta1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "kyve.delegation.v1beta1.MsgUpdateParamsResponse") +} + +func init() { proto.RegisterFile("kyve/delegation/v1beta1/tx.proto", fileDescriptor_cfef676107453bda) } + +var fileDescriptor_cfef676107453bda = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x5f, 0x6b, 0xd3, 0x50, + 0x18, 0xc6, 0x1b, 0x5b, 0xe6, 0xfa, 0x4e, 0x19, 0x44, 0x67, 0xb3, 0x28, 0xb1, 0x04, 0x91, 0x82, + 0x9a, 0x38, 0x07, 0xde, 0x3b, 0x54, 0x10, 0xa9, 0x48, 0x8a, 0x8e, 0x79, 0xe1, 0x38, 0xcd, 0x39, + 0xa6, 0xb1, 0x4b, 0x4e, 0x38, 0xe7, 0xed, 0xba, 0x5e, 0xf9, 0x15, 0xc4, 0xcf, 0xe2, 0x87, 0xf0, + 0x72, 0x78, 0xe5, 0xa5, 0xb4, 0x5f, 0x44, 0xf2, 0x3f, 0xed, 0x34, 0x74, 0xe0, 0xe5, 0x9b, 0xf3, + 0xcb, 0xfb, 0x7b, 0x0e, 0x3c, 0x1c, 0xe8, 0x8e, 0x67, 0xa7, 0xcc, 0xa6, 0xec, 0x84, 0x79, 0x04, + 0x7d, 0x1e, 0xda, 0xa7, 0x7b, 0x43, 0x86, 0x64, 0xcf, 0xc6, 0x33, 0x2b, 0x12, 0x1c, 0xb9, 0xda, + 0x89, 0x09, 0xab, 0x24, 0xac, 0x8c, 0xd0, 0x77, 0x5d, 0x2e, 0x03, 0x2e, 0x8f, 0x13, 0xcc, 0x4e, + 0x87, 0xf4, 0x1f, 0xf3, 0x10, 0xb6, 0xfa, 0xd2, 0x7b, 0x9e, 0xfe, 0xc3, 0x54, 0x0d, 0xae, 0xba, + 0x82, 0x11, 0xe4, 0x42, 0x53, 0xba, 0x4a, 0xaf, 0xed, 0xe4, 0xa3, 0x7a, 0x0b, 0x36, 0x24, 0x92, + 0x31, 0x13, 0xda, 0x95, 0xe4, 0x20, 0x9b, 0xe2, 0xef, 0x24, 0xe0, 0x93, 0x10, 0xb5, 0x66, 0x57, + 0xe9, 0xb5, 0x9c, 0x6c, 0x32, 0x77, 0xe0, 0x46, 0x65, 0xb1, 0xc3, 0x64, 0xc4, 0x43, 0xc9, 0xcc, + 0x97, 0xa0, 0xf6, 0xa5, 0x77, 0xe8, 0xe3, 0x88, 0x0a, 0x32, 0x75, 0xd8, 0x94, 0x08, 0x2a, 0x2f, + 0xaf, 0x35, 0xef, 0x80, 0x7e, 0x71, 0x4f, 0x61, 0x39, 0x82, 0xeb, 0x7d, 0xe9, 0xbd, 0x0b, 0xe9, + 0xff, 0xbf, 0x57, 0x07, 0x76, 0x96, 0x56, 0x17, 0xce, 0x2f, 0x89, 0xd3, 0x61, 0x6b, 0x38, 0xef, + 0xc2, 0xd6, 0x27, 0xc1, 0x83, 0xe3, 0x25, 0x31, 0xc4, 0x9f, 0x06, 0xa9, 0xfc, 0x36, 0xb4, 0x91, + 0xe7, 0xc7, 0xcd, 0xe4, 0x78, 0x13, 0xf9, 0x60, 0x35, 0x59, 0xeb, 0x2f, 0xc9, 0xca, 0x00, 0x45, + 0x32, 0x17, 0xb6, 0xe3, 0xc8, 0x11, 0x25, 0xc8, 0xde, 0x12, 0x41, 0x02, 0xa9, 0x3e, 0x85, 0x36, + 0x99, 0xe0, 0x88, 0x0b, 0x1f, 0x67, 0x69, 0xba, 0x03, 0xed, 0xe7, 0xf7, 0x47, 0x37, 0xb3, 0x6e, + 0x3c, 0xa3, 0x54, 0x30, 0x29, 0x07, 0x28, 0xfc, 0xd0, 0x73, 0x4a, 0x34, 0xbe, 0x53, 0x44, 0x66, + 0x27, 0x9c, 0xd0, 0x2c, 0x75, 0x3e, 0x9a, 0xbb, 0xd0, 0x59, 0x91, 0xe4, 0xfe, 0x27, 0xdf, 0x5a, + 0xd0, 0xec, 0x4b, 0x4f, 0xfd, 0x08, 0x9b, 0x45, 0xd1, 0xee, 0x59, 0xff, 0x28, 0xab, 0x55, 0x69, + 0x8d, 0xfe, 0x70, 0x1d, 0x2a, 0xf7, 0xa8, 0x12, 0xb6, 0x57, 0x8b, 0xf5, 0xa0, 0x6e, 0xc1, 0x0a, + 0xac, 0xef, 0x5f, 0x02, 0x2e, 0xa4, 0x14, 0xa0, 0xd2, 0xb3, 0xfb, 0x75, 0x2b, 0x4a, 0x4e, 0xb7, + 0xd6, 0xe3, 0xaa, 0x96, 0x4a, 0xb3, 0x6a, 0x2d, 0x25, 0x57, 0x6f, 0xb9, 0x58, 0x14, 0xf5, 0x33, + 0x5c, 0x5b, 0x6a, 0x49, 0xaf, 0x36, 0x65, 0x85, 0xd4, 0x1f, 0xaf, 0x4b, 0xe6, 0xae, 0x83, 0x57, + 0x3f, 0xe6, 0x86, 0x72, 0x3e, 0x37, 0x94, 0xdf, 0x73, 0x43, 0xf9, 0xba, 0x30, 0x1a, 0xe7, 0x0b, + 0xa3, 0xf1, 0x6b, 0x61, 0x34, 0x3e, 0xd8, 0x9e, 0x8f, 0xa3, 0xc9, 0xd0, 0x72, 0x79, 0x60, 0xbf, + 0x3e, 0x7a, 0xff, 0xe2, 0x0d, 0xc3, 0x29, 0x17, 0x63, 0xdb, 0x1d, 0x11, 0x3f, 0xb4, 0xcf, 0xaa, + 0x4f, 0x20, 0xce, 0x22, 0x26, 0x87, 0x1b, 0xc9, 0x53, 0xb6, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, + 0xe7, 0x31, 0xcc, 0x6f, 0x22, 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // Delegate ... + Delegate(ctx context.Context, in *MsgDelegate, opts ...grpc.CallOption) (*MsgDelegateResponse, error) + // Withdraw ... + WithdrawRewards(ctx context.Context, in *MsgWithdrawRewards, opts ...grpc.CallOption) (*MsgWithdrawRewardsResponse, error) + // Undelegate ... + Undelegate(ctx context.Context, in *MsgUndelegate, opts ...grpc.CallOption) (*MsgUndelegateResponse, error) + // Redelegate ... + Redelegate(ctx context.Context, in *MsgRedelegate, opts ...grpc.CallOption) (*MsgRedelegateResponse, error) + // UpdateParams defines a governance operation for updating the x/delegation module + // parameters. The authority is hard-coded to the x/gov module account. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) Delegate(ctx context.Context, in *MsgDelegate, opts ...grpc.CallOption) (*MsgDelegateResponse, error) { + out := new(MsgDelegateResponse) + err := c.cc.Invoke(ctx, "/kyve.delegation.v1beta1.Msg/Delegate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) WithdrawRewards(ctx context.Context, in *MsgWithdrawRewards, opts ...grpc.CallOption) (*MsgWithdrawRewardsResponse, error) { + out := new(MsgWithdrawRewardsResponse) + err := c.cc.Invoke(ctx, "/kyve.delegation.v1beta1.Msg/WithdrawRewards", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) Undelegate(ctx context.Context, in *MsgUndelegate, opts ...grpc.CallOption) (*MsgUndelegateResponse, error) { + out := new(MsgUndelegateResponse) + err := c.cc.Invoke(ctx, "/kyve.delegation.v1beta1.Msg/Undelegate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) Redelegate(ctx context.Context, in *MsgRedelegate, opts ...grpc.CallOption) (*MsgRedelegateResponse, error) { + out := new(MsgRedelegateResponse) + err := c.cc.Invoke(ctx, "/kyve.delegation.v1beta1.Msg/Redelegate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.delegation.v1beta1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // Delegate ... + Delegate(context.Context, *MsgDelegate) (*MsgDelegateResponse, error) + // Withdraw ... + WithdrawRewards(context.Context, *MsgWithdrawRewards) (*MsgWithdrawRewardsResponse, error) + // Undelegate ... + Undelegate(context.Context, *MsgUndelegate) (*MsgUndelegateResponse, error) + // Redelegate ... + Redelegate(context.Context, *MsgRedelegate) (*MsgRedelegateResponse, error) + // UpdateParams defines a governance operation for updating the x/delegation module + // parameters. The authority is hard-coded to the x/gov module account. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) Delegate(ctx context.Context, req *MsgDelegate) (*MsgDelegateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delegate not implemented") +} +func (*UnimplementedMsgServer) WithdrawRewards(ctx context.Context, req *MsgWithdrawRewards) (*MsgWithdrawRewardsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WithdrawRewards not implemented") +} +func (*UnimplementedMsgServer) Undelegate(ctx context.Context, req *MsgUndelegate) (*MsgUndelegateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Undelegate not implemented") +} +func (*UnimplementedMsgServer) Redelegate(ctx context.Context, req *MsgRedelegate) (*MsgRedelegateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Redelegate not implemented") +} +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_Delegate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDelegate) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Delegate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.delegation.v1beta1.Msg/Delegate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Delegate(ctx, req.(*MsgDelegate)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_WithdrawRewards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgWithdrawRewards) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).WithdrawRewards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.delegation.v1beta1.Msg/WithdrawRewards", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).WithdrawRewards(ctx, req.(*MsgWithdrawRewards)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_Undelegate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUndelegate) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Undelegate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.delegation.v1beta1.Msg/Undelegate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Undelegate(ctx, req.(*MsgUndelegate)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_Redelegate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRedelegate) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Redelegate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.delegation.v1beta1.Msg/Redelegate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Redelegate(ctx, req.(*MsgRedelegate)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.delegation.v1beta1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.delegation.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Delegate", + Handler: _Msg_Delegate_Handler, + }, + { + MethodName: "WithdrawRewards", + Handler: _Msg_WithdrawRewards_Handler, + }, + { + MethodName: "Undelegate", + Handler: _Msg_Undelegate_Handler, + }, + { + MethodName: "Redelegate", + Handler: _Msg_Redelegate_Handler, + }, + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/delegation/v1beta1/tx.proto", +} + +func (m *MsgDelegate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDelegate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDelegate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDelegateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDelegateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDelegateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawRewards) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawRewards) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawRewards) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawRewardsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawRewardsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawRewardsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUndelegate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUndelegate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUndelegate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUndelegateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUndelegateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUndelegateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRedelegate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRedelegate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRedelegate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x20 + } + if len(m.ToStaker) > 0 { + i -= len(m.ToStaker) + copy(dAtA[i:], m.ToStaker) + i = encodeVarintTx(dAtA, i, uint64(len(m.ToStaker))) + i-- + dAtA[i] = 0x1a + } + if len(m.FromStaker) > 0 { + i -= len(m.FromStaker) + copy(dAtA[i:], m.FromStaker) + i = encodeVarintTx(dAtA, i, uint64(len(m.FromStaker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRedelegateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRedelegateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRedelegateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTx(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgDelegate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgDelegateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgWithdrawRewards) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgWithdrawRewardsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUndelegate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgUndelegateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRedelegate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.FromStaker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ToStaker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgRedelegateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgDelegate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDelegate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDelegate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDelegateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDelegateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDelegateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawRewards) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawRewards: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawRewards: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawRewardsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawRewardsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawRewardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUndelegate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUndelegate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUndelegate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUndelegateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUndelegateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUndelegateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRedelegate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRedelegate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRedelegate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromStaker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FromStaker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ToStaker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ToStaker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRedelegateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRedelegateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRedelegateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/delegation/types/types.go b/x/delegation/types/types.go new file mode 100644 index 00000000..ab1254f4 --- /dev/null +++ b/x/delegation/types/types.go @@ -0,0 +1 @@ +package types diff --git a/x/global/abci.go b/x/global/abci.go new file mode 100644 index 00000000..a692f0b1 --- /dev/null +++ b/x/global/abci.go @@ -0,0 +1,49 @@ +package global + +import ( + "github.com/KYVENetwork/chain/util" + sdk "github.com/cosmos/cosmos-sdk/types" + + // Auth + authKeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // Global + "github.com/KYVENetwork/chain/x/global/keeper" + // Upgrade + upgradeKeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" +) + +// EndBlocker handles the fee burning if it is configured +func EndBlocker(ctx sdk.Context, ak authKeeper.AccountKeeper, bk bankKeeper.Keeper, gk keeper.Keeper, uk upgradeKeeper.Keeper) { + // Since no fees are paid in the genesis block, skip. + // NOTE: This is Tendermint specific. + if ctx.BlockHeight() == 1 { + return + } + + burnRatio := gk.GetBurnRatio(ctx) + if burnRatio.IsZero() { + return + } + + // Obtain all collected fees. + feeCoinsInt := bk.GetAllBalances(ctx, ak.GetModuleAddress(authTypes.FeeCollectorName)) + feeCoins := sdk.NewDecCoinsFromCoins(feeCoinsInt...) + if feeCoins.IsZero() { + return + } + + // Sum burn ratio amount. + burnCoins := sdk.NewCoins() + for _, coin := range feeCoins { + amount := coin.Amount.Mul(burnRatio) + burnCoins = burnCoins.Add(sdk.NewCoin(coin.Denom, amount.TruncateInt())) + } + + err := bk.BurnCoins(ctx, authTypes.FeeCollectorName, burnCoins) + if err != nil { + util.PanicHalt(uk, ctx, err.Error()) + } +} diff --git a/x/global/abci_test.go b/x/global/abci_test.go new file mode 100644 index 00000000..0985b930 --- /dev/null +++ b/x/global/abci_test.go @@ -0,0 +1,154 @@ +package global_test + +import ( + "cosmossdk.io/math" + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Global + "github.com/KYVENetwork/chain/x/global" + "github.com/KYVENetwork/chain/x/global/types" +) + +/* + +TEST CASES - DeductFeeDecorator + +* BurnRatio = 0.0 +* BurnRatio = 2/3 - test truncate +* BurnRatio = 0.5 +* BurnRatio = 1.0 + +* TODO(@max): combine with refund + +*/ + +var _ = Describe("AbciEndBlocker", Ordered, func() { + s := i.NewCleanChain() + encodingConfig := BuildEncodingConfig() + dfd := global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + + accountBalanceBefore := s.GetBalanceFromAddress(i.DUMMY[0]) + totalSupplyBefore := s.App().BankKeeper.GetSupply(s.Ctx(), types.Denom).Amount.Uint64() + + BeforeEach(func() { + s = i.NewCleanChain() + + mintParams := s.App().MintKeeper.GetParams(s.Ctx()) + mintParams.InflationMax = sdk.ZeroDec() + mintParams.InflationMin = sdk.ZeroDec() + s.App().MintKeeper.SetParams(s.Ctx(), mintParams) + + accountBalanceBefore = s.GetBalanceFromAddress(i.DUMMY[0]) + totalSupplyBefore = s.App().BankKeeper.GetSupply(s.Ctx(), types.Denom).Amount.Uint64() + dfd = global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("BurnRatio = 0.0", func() { + // ARRANGE + // default burn ratio is zero + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(err).Should(Not(HaveOccurred())) + + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + accountBalanceDifference := accountBalanceBefore - accountBalanceAfter + Expect(accountBalanceDifference).To(Equal(uint64(200_000))) + + totalSupplyAfter := s.App().BankKeeper.GetSupply(s.Ctx(), types.Denom).Amount.Uint64() + totalSupplyDifference := totalSupplyBefore - totalSupplyAfter + Expect(totalSupplyDifference).To(Equal(uint64(0))) + }) + + It("BurnRatio = 2/3 - test truncate", func() { + // ARRANGE + // set burn ratio to 0.3 + params := types.DefaultParams() + params.BurnRatio = sdk.OneDec().MulInt64(2).QuoInt64(3) + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + // default burn ratio is zero + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(err).Should(Not(HaveOccurred())) + + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + accountBalanceDifference := accountBalanceBefore - accountBalanceAfter + Expect(accountBalanceDifference).To(Equal(uint64(200_000))) + + totalSupplyAfter := s.App().BankKeeper.GetSupply(s.Ctx(), types.Denom).Amount.Uint64() + totalSupplyDifference := totalSupplyBefore - totalSupplyAfter + // Expect ..666 not ..667 + Expect(totalSupplyDifference).To(Equal(uint64(133_333))) + }) + + It("BurnRatio = 0.5", func() { + // ARRANGE + // set burn ratio to 0.5 + params := types.DefaultParams() + params.BurnRatio = sdk.OneDec().QuoInt64(2) + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(err).Should(Not(HaveOccurred())) + + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + accountBalanceDifference := accountBalanceBefore - accountBalanceAfter + Expect(accountBalanceDifference).To(Equal(uint64(200_000))) + + totalSupplyAfter := s.App().BankKeeper.GetSupply(s.Ctx(), types.Denom).Amount.Uint64() + totalSupplyDifference := totalSupplyBefore - totalSupplyAfter + Expect(totalSupplyDifference).To(Equal(uint64(100_000))) + }) + + It("BurnRatio = 1.0", func() { + // ARRANGE + // set burn ratio to 0.5 + params := types.DefaultParams() + params.BurnRatio = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + s.CommitAfterSeconds(1) + + // ASSERT + Expect(err).Should(Not(HaveOccurred())) + + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + accountBalanceDifference := accountBalanceBefore - accountBalanceAfter + Expect(accountBalanceDifference).To(Equal(uint64(200_000))) + + totalSupplyAfter := s.App().BankKeeper.GetSupply(s.Ctx(), types.Denom).Amount.Uint64() + totalSupplyDifference := totalSupplyBefore - totalSupplyAfter + Expect(totalSupplyDifference).To(Equal(uint64(200_000))) + }) +}) diff --git a/x/global/ante.go b/x/global/ante.go new file mode 100644 index 00000000..e7618d8d --- /dev/null +++ b/x/global/ante.go @@ -0,0 +1,149 @@ +package global + +import ( + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" + + // Auth + "github.com/cosmos/cosmos-sdk/x/auth/ante" + authKeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // FeeGrant + feeGrantKeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + // Global + "github.com/KYVENetwork/chain/x/global/keeper" + // Gov + govKeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + legacyGovTypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + // Staking + stakingKeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" +) + +// DeductFeeDecorator + +// The DeductFeeDecorator is responsible for the +// consensus minimum gas price. +// Validators can still choose their own (higher) gas prices. +type DeductFeeDecorator struct { + accountKeeper authKeeper.AccountKeeper + bankKeeper bankKeeper.Keeper + feeGrantKeeper feeGrantKeeper.Keeper + globalKeeper keeper.Keeper + stakingKeeper stakingKeeper.Keeper +} + +func NewDeductFeeDecorator(ak authKeeper.AccountKeeper, bk bankKeeper.Keeper, fk feeGrantKeeper.Keeper, gk keeper.Keeper, sk stakingKeeper.Keeper) DeductFeeDecorator { + return DeductFeeDecorator{ + accountKeeper: ak, + bankKeeper: bk, + feeGrantKeeper: fk, + globalKeeper: gk, + stakingKeeper: sk, + } +} + +func (dfd DeductFeeDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { + // NOTE: This is Tendermint specific. + var tfc ante.TxFeeChecker + if ctx.BlockHeight() > 1 { + tfc = BuildTxFeeChecker(ctx, dfd.globalKeeper, dfd.stakingKeeper) + } + + internalDfd := ante.NewDeductFeeDecorator(dfd.accountKeeper, dfd.bankKeeper, dfd.feeGrantKeeper, tfc) + + return internalDfd.AnteHandle(ctx, tx, simulate, next) +} + +// GasAdjustmentDecorator + +// The GasAdjustmentDecorator allows to add additional gas-consumption +// to message types, making transactions which should only be used rerely +// more expensive. +type GasAdjustmentDecorator struct { + globalKeeper keeper.Keeper +} + +func NewGasAdjustmentDecorator(gk keeper.Keeper) GasAdjustmentDecorator { + return GasAdjustmentDecorator{ + globalKeeper: gk, + } +} + +func (gad GasAdjustmentDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { + gasAdjustments := gad.globalKeeper.GetGasAdjustments(ctx) + + for _, msg := range tx.GetMsgs() { + for _, adjustment := range gasAdjustments { + if sdk.MsgTypeURL(msg) == adjustment.Type { + ctx.GasMeter().ConsumeGas(adjustment.Amount, adjustment.Type) + break + } + } + } + + return next(ctx, tx, simulate) +} + +// InitialDepositDecorator + +// The InitialDepositDecorator is responsible for checking +// if the submit-proposal message also provides the required +// minimum deposit. Otherwise, the message is rejected. +type InitialDepositDecorator struct { + globalKeeper keeper.Keeper + govKeeper govKeeper.Keeper +} + +func NewInitialDepositDecorator(globalKeeper keeper.Keeper, govKeeper govKeeper.Keeper) InitialDepositDecorator { + return InitialDepositDecorator{ + globalKeeper: globalKeeper, + govKeeper: govKeeper, + } +} + +func (idd InitialDepositDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { + // NOTE: This is Tendermint specific. + if ctx.BlockHeight() <= 1 { + return next(ctx, tx, simulate) + } + + minInitialDepositRatio := idd.globalKeeper.GetMinInitialDepositRatio(ctx) + depositParams := idd.govKeeper.GetDepositParams(ctx) + + requiredDeposit := sdk.NewCoins() + for _, coin := range depositParams.MinDeposit { + amount := sdk.NewDecFromInt(coin.Amount).Mul(minInitialDepositRatio).TruncateInt() + requiredDeposit = requiredDeposit.Add(sdk.NewCoin(coin.Denom, amount)) + } + + for _, rawMsg := range tx.GetMsgs() { + initialDeposit := sdk.NewCoins() + throwError := false + + if sdk.MsgTypeURL(rawMsg) == sdk.MsgTypeURL(&legacyGovTypes.MsgSubmitProposal{}) { + // cosmos.gov.v1beta1.MsgSubmitProposal + if legacyMsg, ok := rawMsg.(*legacyGovTypes.MsgSubmitProposal); ok { + initialDeposit = legacyMsg.GetInitialDeposit() + throwError = !initialDeposit.IsAllGTE(requiredDeposit) + } + } else if sdk.MsgTypeURL(rawMsg) == sdk.MsgTypeURL(&govTypes.MsgSubmitProposal{}) { + // cosmos.gov.v1.MsgSubmitProposal + if msg, ok := rawMsg.(*govTypes.MsgSubmitProposal); ok { + initialDeposit = msg.GetInitialDeposit() + throwError = !initialDeposit.IsAllGTE(requiredDeposit) + } + } + + if throwError { + return ctx, errors.Wrapf( + errorsTypes.ErrLogic, "minimum deposit is too small - was (%s), need (%s)", + initialDeposit, requiredDeposit, + ) + } + } + + return next(ctx, tx, simulate) +} diff --git a/x/global/ante_test.go b/x/global/ante_test.go new file mode 100644 index 00000000..7a7247f6 --- /dev/null +++ b/x/global/ante_test.go @@ -0,0 +1,609 @@ +package global_test + +import ( + "cosmossdk.io/math" + i "github.com/KYVENetwork/chain/testutil/integration" + stakersTypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + bankTypes "github.com/cosmos/cosmos-sdk/x/bank/types" + stakingTypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Auth + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + + // Global + "github.com/KYVENetwork/chain/x/global" + "github.com/KYVENetwork/chain/x/global/types" + + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + govLegacyTypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +/* + +TEST CASES - DeductFeeDecorator + +* Invalid transaction. +* consensusGasPrice = 0.0; validatorGasPrice = 0.0 - deliverTX +* consensusGasPrice = 0.0; validatorGasPrice = 0.0 - checkTX +* consensusGasPrice = 1.0; validatorGasPrice = 0.0 - deliverTX - not enough fees +* consensusGasPrice = 1.0; validatorGasPrice = 0.0 - deliverTX - enough fees +* consensusGasPrice = 1.0; validatorGasPrice = 0.0 - checkTx - not enough fees +* consensusGasPrice = 1.0; validatorGasPrice = 0.0 - checkTx - enough fees +* consensusGasPrice = 1.0; validatorGasPrice = 2.0 - deliverTX - not enough fees +* consensusGasPrice = 1.0; validatorGasPrice = 2.0 - deliverTX - not enough fees for validator but enough for consensus. +* consensusGasPrice = 1.0; validatorGasPrice = 2.0 - checkTx - not enough fees +* consensusGasPrice = 1.0; validatorGasPrice = 2.0 - checkTx - not enough fees for validator but enough for consensus. + +*/ + +var _ = Describe("DeductFeeDecorator", Ordered, func() { + s := i.NewCleanChain() + encodingConfig := BuildEncodingConfig() + dfd := global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + + accountBalanceBefore := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceBefore := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + BeforeEach(func() { + s = i.NewCleanChain() + encodingConfig = BuildEncodingConfig() + denom = s.App().StakingKeeper.BondDenom(s.Ctx()) + dfd = global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid transaction.", func() { + // ARRANGE + dfd := global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + + // ACT + _, err := dfd.AnteHandle(s.Ctx(), &InvalidTx{}, false, NextFn) + + // ASSERT + Expect(err).Should(HaveOccurred()) + }) + + It("consensusGasPrice = 0.0; validatorGasPrice = 0.0 - deliverTX", func() { + // ARRANGE + dfd := global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + tx := BuildTestTx(math.ZeroInt(), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx().WithIsCheckTx(false), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) + + It("consensusGasPrice = 0.0; validatorGasPrice = 0.0 - checkTX", func() { + // ARRANGE + dfd := global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + tx := BuildTestTx(math.ZeroInt(), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx().WithIsCheckTx(true), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 0.0 - deliverTX - not enough fees", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + tx := BuildTestTx(math.ZeroInt(), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx().WithIsCheckTx(false), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(HaveOccurred()) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 0.0 - deliverTX - enough fees", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx().WithIsCheckTx(false), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 200_000)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter - 200_000)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 0.0 - checkTx - not enough fees", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + tx := BuildTestTx(math.ZeroInt(), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx().WithIsCheckTx(true), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(HaveOccurred()) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 0.0 - checkTx - enough fees", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(s.Ctx().WithIsCheckTx(true), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 200_000)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter - 200_000)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 2.0 - deliverTX - not enough fees", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + ctx := s.Ctx().WithMinGasPrices(sdk.NewDecCoins(sdk.NewDecCoin(denom, sdk.NewInt(2)))) + s.SetCtx(ctx) + tx := BuildTestTx(math.ZeroInt(), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(ctx.WithIsCheckTx(false), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(HaveOccurred()) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 2.0 - deliverTX - not enough fees for validator but enough for consensus.", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + ctx := s.Ctx().WithMinGasPrices(sdk.NewDecCoins(sdk.NewDecCoin(denom, sdk.NewInt(2)))) + s.SetCtx(ctx) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(ctx.WithIsCheckTx(false), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 200_000)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter - 200_000)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 2.0 - checkTx - not enough fees", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + ctx := s.Ctx().WithMinGasPrices(sdk.NewDecCoins(sdk.NewDecCoin(denom, sdk.NewInt(2)))) + s.SetCtx(ctx) + tx := BuildTestTx(math.ZeroInt(), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(ctx.WithIsCheckTx(true), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(HaveOccurred()) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) + + It("consensusGasPrice = 1.0; validatorGasPrice = 2.0 - checkTx - not enough fees for validator but enough for consensus.", func() { + // ARRANGE + params := types.DefaultParams() + params.MinGasPrice = sdk.OneDec() + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + ctx := s.Ctx().WithMinGasPrices(sdk.NewDecCoins(sdk.NewDecCoin(denom, sdk.NewInt(2)))) + s.SetCtx(ctx) + tx := BuildTestTx(math.NewInt(1), denom, i.DUMMY[0], encodingConfig) + + // ACT + _, err := dfd.AnteHandle(ctx.WithIsCheckTx(true), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.DUMMY[0]) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(err).Should(HaveOccurred()) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceBefore).To(Equal(collectorBalanceAfter)) + }) +}) + +/* + +TEST CASES - GasAdjustmentDecorator + +* Empty transaction. +* Transaction with a normal message. +* Transaction with an adjusted message. +* Transaction with multiple adjusted messages. +* Transaction with multiple normal and multiple adjusted messages. + +*/ + +var _ = Describe("GasAdjustmentDecorator", Ordered, func() { + s := i.NewCleanChain() + encodingConfig := BuildEncodingConfig() + + // NOTE: This will change as implementation changes. + BaseCost := 32079 + + BeforeEach(func() { + s = i.NewCleanChain() + + params := types.DefaultParams() + params.GasAdjustments = []types.GasAdjustment{ + { + Type: "/cosmos.staking.v1beta1.MsgCreateValidator", + Amount: 2000, + }, + { + Type: "/kyve.stakers.v1beta1.MsgCreateStaker", + Amount: 1000, + }, + } + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Empty transaction.", func() { + // ARRANGE + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + tx := txBuilder.GetTx() + + gad := global.NewGasAdjustmentDecorator(s.App().GlobalKeeper) + + // ACT + _, err := gad.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + Expect(s.Ctx().GasMeter().GasConsumed()).To(BeEquivalentTo(BaseCost)) + }) + + It("Transaction with a normal message.", func() { + // ARRANGE + msg := bankTypes.MsgSend{} + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + gad := global.NewGasAdjustmentDecorator(s.App().GlobalKeeper) + + // ACT + _, err := gad.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + Expect(s.Ctx().GasMeter().GasConsumed()).To(BeEquivalentTo(BaseCost)) + }) + + It("Transaction with an adjusted message.", func() { + // ARRANGE + msg := stakingTypes.MsgCreateValidator{} + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + gad := global.NewGasAdjustmentDecorator(s.App().GlobalKeeper) + + // ACT + _, err := gad.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + Expect(s.Ctx().GasMeter().GasConsumed()).To(BeEquivalentTo(BaseCost + 2000)) + }) + + It("Transaction with multiple adjusted messages.", func() { + // ARRANGE + firstMsg := stakingTypes.MsgCreateValidator{} + secondMsg := stakersTypes.MsgCreateStaker{} + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + _ = txBuilder.SetMsgs(&firstMsg, &secondMsg) + tx := txBuilder.GetTx() + + gad := global.NewGasAdjustmentDecorator(s.App().GlobalKeeper) + + // ACT + _, err := gad.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + Expect(s.Ctx().GasMeter().GasConsumed()).To(BeEquivalentTo(BaseCost + 3000)) + }) + + It("Transaction with multiple normal and multiple adjusted messages.", func() { + // ARRANGE + firstMsg := stakersTypes.MsgJoinPool{} + secondMsg := stakersTypes.MsgCreateStaker{} + thirdMsg := stakingTypes.MsgCreateValidator{} + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + _ = txBuilder.SetMsgs(&firstMsg, &secondMsg, &thirdMsg) + tx := txBuilder.GetTx() + + gad := global.NewGasAdjustmentDecorator(s.App().GlobalKeeper) + + // ACT + _, err := gad.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + Expect(s.Ctx().GasMeter().GasConsumed()).To(BeEquivalentTo(BaseCost + 3000)) + }) +}) + +/* +TEST CASES - InitialDepositDecorator + +* No Deposit, no min-deposit - v1 +* No Deposit, no min-deposit - legacy +* Deposit, no min-deposit - v1 +* Deposit, no min-deposit - legacy +* No Deposit, min-deposit - v1 +* No Deposit, min-deposit - legacy +* Deposit, min-deposit - v1 +* Deposit, min-deposit - legacy +*/ +var _ = Describe("InitialDepositDecorator", Ordered, func() { + s := i.NewCleanChain() + encodingConfig := BuildEncodingConfig() + zeroCoins := sdk.NewCoins(sdk.NewCoin(types.Denom, math.ZeroInt())) + var emptyMsg []sdk.Msg + + BeforeEach(func() { + s = i.NewCleanChain() + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("No Deposit, no min-deposit - v1", func() { + // ARRANGE + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + submitMsg, govErr := govV1Types.NewMsgSubmitProposal(emptyMsg, zeroCoins, i.ALICE, "metadata") + Expect(govErr).ToNot(HaveOccurred()) + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + }) + + It("No Deposit, no min-deposit - legacy", func() { + // ARRANGE + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + + content, created := govLegacyTypes.ContentFromProposalType("Text-test", "Descirption", "Text") + Expect(created).To(BeTrue()) + + submitMsg, govErr := govLegacyTypes.NewMsgSubmitProposal(content, zeroCoins, sdk.MustAccAddressFromBech32(i.ALICE)) + Expect(govErr).ToNot(HaveOccurred()) + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + }) + + It("Deposit, no min-deposit - v1", func() { + // ARRANGE + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + hundredKyveCoins := sdk.NewCoins(sdk.NewCoin(types.Denom, math.NewInt(100_000_000_000))) + submitMsg, govErr := govV1Types.NewMsgSubmitProposal(emptyMsg, hundredKyveCoins, i.ALICE, "metadata") + Expect(govErr).ToNot(HaveOccurred()) + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + }) + + It("Deposit, no min-deposit - legacy", func() { + // ARRANGE + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + + content, created := govLegacyTypes.ContentFromProposalType("Text-test", "Descirption", "Text") + Expect(created).To(BeTrue()) + + hundredKyveCoins := sdk.NewCoins(sdk.NewCoin(types.Denom, math.NewInt(100_000_000_000))) + submitMsg, govErr := govLegacyTypes.NewMsgSubmitProposal(content, hundredKyveCoins, sdk.MustAccAddressFromBech32(i.ALICE)) + Expect(govErr).ToNot(HaveOccurred()) + + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + }) + + It("No Deposit, min-deposit - v1", func() { + // ARRANGE + params := types.DefaultParams() + params.MinInitialDepositRatio = sdk.NewDec(1).QuoInt64(4) + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + submitMsg, govErr := govV1Types.NewMsgSubmitProposal(emptyMsg, zeroCoins, i.ALICE, "metadata") + Expect(govErr).ToNot(HaveOccurred()) + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("No Deposit, min-deposit - legacy", func() { + // ARRANGE + params := types.DefaultParams() + params.MinInitialDepositRatio = sdk.NewDec(1).QuoInt64(4) + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + + content, created := govLegacyTypes.ContentFromProposalType("Text-test", "Descirption", "Text") + Expect(created).To(BeTrue()) + + submitMsg, govErr := govLegacyTypes.NewMsgSubmitProposal(content, zeroCoins, sdk.MustAccAddressFromBech32(i.ALICE)) + Expect(govErr).ToNot(HaveOccurred()) + + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Deposit, min-deposit - v1", func() { + // ARRANGE + params := types.DefaultParams() + params.MinInitialDepositRatio = sdk.NewDec(1).QuoInt64(4) + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + twentyFiveKyveCoins := sdk.NewCoins(sdk.NewCoin(types.Denom, math.NewInt(25_000_000_000))) + + submitMsg, govErr := govV1Types.NewMsgSubmitProposal(emptyMsg, twentyFiveKyveCoins, i.ALICE, "metadata") + Expect(govErr).ToNot(HaveOccurred()) + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + }) + + It("Deposit, min-deposit - legacy", func() { + // ARRANGE + params := types.DefaultParams() + params.MinInitialDepositRatio = sdk.NewDec(1).QuoInt64(4) + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + + content, created := govLegacyTypes.ContentFromProposalType("Text-test", "Descirption", "Text") + Expect(created).To(BeTrue()) + + twentyFiveKyveCoins := sdk.NewCoins(sdk.NewCoin(types.Denom, math.NewInt(25_000_000_000))) + submitMsg, govErr := govLegacyTypes.NewMsgSubmitProposal(content, twentyFiveKyveCoins, sdk.MustAccAddressFromBech32(i.ALICE)) + Expect(govErr).ToNot(HaveOccurred()) + + _ = txBuilder.SetMsgs(submitMsg) + tx := txBuilder.GetTx() + + gid := global.NewInitialDepositDecorator(s.App().GlobalKeeper, s.App().GovKeeper) + + // ACT + _, err := gid.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + Expect(err).ToNot(HaveOccurred()) + }) +}) diff --git a/x/global/ante_utils_test.go b/x/global/ante_utils_test.go new file mode 100644 index 00000000..3f562d1d --- /dev/null +++ b/x/global/ante_utils_test.go @@ -0,0 +1,78 @@ +package global_test + +import ( + "cosmossdk.io/math" + amino "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/simapp/params" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/tx" +) + +// BuildEncodingConfig ... +func BuildEncodingConfig() params.EncodingConfig { + cdc := amino.NewLegacyAmino() + interfaceRegistry := types.NewInterfaceRegistry() + codec := amino.NewProtoCodec(interfaceRegistry) + + encodingConfig := params.EncodingConfig{ + InterfaceRegistry: interfaceRegistry, + Codec: codec, + TxConfig: tx.NewTxConfig(codec, tx.DefaultSignModes), + Amino: cdc, + } + + return encodingConfig +} + +// BuildTestTx ... +func BuildTestTx(gasPrice math.Int, denom string, feePayer string, encodingConfig params.EncodingConfig) sdk.FeeTx { + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + + fees := sdk.NewCoins(sdk.NewCoin(denom, gasPrice.MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + + msg := &TestMsg{Signers: []string{feePayer}} + _ = txBuilder.SetMsgs(msg) + + return txBuilder.GetTx() +} + +// Invalid Transaction. +var _ sdk.Tx = &InvalidTx{} + +type InvalidTx struct{} + +func (InvalidTx) GetMsgs() []sdk.Msg { return []sdk.Msg{nil} } +func (InvalidTx) ValidateBasic() error { return nil } + +// NextFn ... +func NextFn(ctx sdk.Context, _ sdk.Tx, _ bool) (sdk.Context, error) { + return ctx, nil +} + +// Test Message. +var _ sdk.Msg = (*TestMsg)(nil) + +type TestMsg struct { + Signers []string +} + +func (msg *TestMsg) Reset() {} +func (msg *TestMsg) String() string { return "" } +func (msg *TestMsg) ProtoMessage() {} +func (msg *TestMsg) ValidateBasic() error { return nil } + +func (msg *TestMsg) GetSigners() []sdk.AccAddress { + var addrs []sdk.AccAddress + + for _, signer := range msg.Signers { + addr := sdk.MustAccAddressFromBech32(signer) + addrs = append(addrs, addr) + } + + return addrs +} diff --git a/x/global/client/cli/query.go b/x/global/client/cli/query.go new file mode 100644 index 00000000..9a00b86f --- /dev/null +++ b/x/global/client/cli/query.go @@ -0,0 +1,25 @@ +package cli + +import ( + "fmt" + + "github.com/KYVENetwork/chain/x/global/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/spf13/cobra" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd() *cobra.Command { + // Group x/global queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdQueryParams()) + + return cmd +} diff --git a/x/global/client/cli/query_params.go b/x/global/client/cli/query_params.go new file mode 100644 index 00000000..b3a1c211 --- /dev/null +++ b/x/global/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/KYVENetwork/chain/x/global/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/global/genesis.go b/x/global/genesis.go new file mode 100644 index 00000000..2004c11e --- /dev/null +++ b/x/global/genesis.go @@ -0,0 +1,18 @@ +package global + +import ( + "github.com/KYVENetwork/chain/x/global/keeper" + "github.com/KYVENetwork/chain/x/global/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the x/global module's state from a provided genesis state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) +} + +// ExportGenesis returns the x/global module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + params := k.GetParams(ctx) + return types.NewGenesisState(params) +} diff --git a/x/global/keeper/getters_params.go b/x/global/keeper/getters_params.go new file mode 100644 index 00000000..5854757c --- /dev/null +++ b/x/global/keeper/getters_params.go @@ -0,0 +1,51 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/global/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams returns the current x/global module parameters. +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + store := ctx.KVStore(k.storeKey) + + bz := store.Get(types.ParamsKey) + if bz == nil { + return params + } + + k.cdc.MustUnmarshal(bz, ¶ms) + return params +} + +// GetMinGasPrice returns the MinGasPrice param. +func (k Keeper) GetMinGasPrice(ctx sdk.Context) (res sdk.Dec) { + return k.GetParams(ctx).MinGasPrice +} + +// GetBurnRatio returns the BurnRatio param. +func (k Keeper) GetBurnRatio(ctx sdk.Context) (res sdk.Dec) { + return k.GetParams(ctx).BurnRatio +} + +// GetGasAdjustments returns the GasAdjustments param. +func (k Keeper) GetGasAdjustments(ctx sdk.Context) (res []types.GasAdjustment) { + return k.GetParams(ctx).GasAdjustments +} + +// GetGasRefunds returns the GasRefunds param. +func (k Keeper) GetGasRefunds(ctx sdk.Context) (res []types.GasRefund) { + return k.GetParams(ctx).GasRefunds +} + +// GetMinInitialDepositRatio returns the MinInitialDepositRatio param. +func (k Keeper) GetMinInitialDepositRatio(ctx sdk.Context) (res sdk.Dec) { + return k.GetParams(ctx).MinInitialDepositRatio +} + +// SetParams sets the x/global module parameters. +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(¶ms) + store.Set(types.ParamsKey, bz) +} diff --git a/x/global/keeper/grpc_query.go b/x/global/keeper/grpc_query.go new file mode 100644 index 00000000..791032d7 --- /dev/null +++ b/x/global/keeper/grpc_query.go @@ -0,0 +1,7 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/global/types" +) + +var _ types.QueryServer = Keeper{} diff --git a/x/global/keeper/grpc_query_params.go b/x/global/keeper/grpc_query_params.go new file mode 100644 index 00000000..1cfd964b --- /dev/null +++ b/x/global/keeper/grpc_query_params.go @@ -0,0 +1,19 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/global/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/global/keeper/keeper.go b/x/global/keeper/keeper.go new file mode 100644 index 00000000..31427bba --- /dev/null +++ b/x/global/keeper/keeper.go @@ -0,0 +1,36 @@ +package keeper + +import ( + "fmt" + + "github.com/KYVENetwork/chain/x/global/types" + "github.com/cosmos/cosmos-sdk/codec" + storeTypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storeTypes.StoreKey + + authority string + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storeTypes.StoreKey, + authority string, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + authority: authority, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} diff --git a/x/global/keeper/keeper_test.go b/x/global/keeper/keeper_test.go new file mode 100644 index 00000000..7168b86f --- /dev/null +++ b/x/global/keeper/keeper_test.go @@ -0,0 +1,16 @@ +package keeper_test + +import ( + "fmt" + "testing" + + "github.com/KYVENetwork/chain/x/global/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestGlobalKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Keeper Test Suite", types.ModuleName)) +} diff --git a/x/global/keeper/msg_server.go b/x/global/keeper/msg_server.go new file mode 100644 index 00000000..fe2f8ba6 --- /dev/null +++ b/x/global/keeper/msg_server.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/global/types" +) + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/x/global/keeper/msg_server_update_params.go b/x/global/keeper/msg_server_update_params.go new file mode 100644 index 00000000..ba7d4591 --- /dev/null +++ b/x/global/keeper/msg_server_update_params.go @@ -0,0 +1,29 @@ +package keeper + +import ( + "context" + "encoding/json" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + + // Global + "github.com/KYVENetwork/chain/x/global/types" + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" +) + +func (k msgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + params := k.GetParams(ctx) + + payload := params + _ = json.Unmarshal([]byte(req.Payload), &payload) + k.SetParams(ctx, payload) + + return &types.MsgUpdateParamsResponse{}, nil +} diff --git a/x/global/keeper/msg_server_update_params_test.go b/x/global/keeper/msg_server_update_params_test.go new file mode 100644 index 00000000..c007c60e --- /dev/null +++ b/x/global/keeper/msg_server_update_params_test.go @@ -0,0 +1,629 @@ +package keeper_test + +import ( + "fmt" + + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Global + "github.com/KYVENetwork/chain/x/global/types" + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" +) + +/* + +TEST CASES - msg_server_update_params.go + +* Check default params +* Invalid authority (transaction) +* Invalid authority (proposal) +* Update every param at once +* Update no param +* Update with invalid formatted payload +* Update min gas price +* Update min gas price with invalid value + +* Update burn ratio +* Update burn ratio with invalid value + +* Update gas adjustments +* Update gas adjustments with invalid value + +* Update gas refunds +* Update gas refunds with invalid value + +* Update min initial deposit ratio +* Update min initial deposit ratio with invalid value + +*/ + +var _ = Describe("msg_server_update_params.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + + minDeposit := s.App().GovKeeper.GetDepositParams(s.Ctx()).MinDeposit + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter := sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + + BeforeEach(func() { + s = i.NewCleanChain() + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter = sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Check default params", func() { + // ASSERT + params := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(params.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(params.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(params.GasAdjustments).To(BeNil()) + Expect(params.GasRefunds).To(BeNil()) + Expect(params.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err.Error()).To(Equal( + fmt.Sprintf( + "%s: %s", + fmt.Sprintf("invalid authority; expected %s, got %s", gov, i.DUMMY[0]), + govTypes.ErrInvalidSigner, + ), + )) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, err := s.RunTx(proposal) + + // ASSERT + Expect(err.Error()).To(Equal( + fmt.Sprintf("%s: %s", i.DUMMY[0], govTypes.ErrInvalidSigner), + )) + }) + + It("Update every param at once", func() { + // ARRANGE + payload := `{ + "min_gas_price": "1.5", + "burn_ratio": "0.2", + "gas_adjustments": [{ + "type": "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + "amount": 20 + }], + "gas_refunds": [{ + "type": "/kyve.bundles.v1beta1.MsgSubmitBundleProposal", + "fraction": "0.75" + }], + "min_initial_deposit_ratio": "0.2" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(sdk.MustNewDecFromStr("1.5"))) + Expect(updatedParams.BurnRatio).To(Equal(sdk.MustNewDecFromStr("0.2"))) + Expect(updatedParams.GasAdjustments).To(Equal([]types.GasAdjustment{ + { + Type: "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + Amount: 20, + }, + })) + Expect(updatedParams.GasRefunds).To(Equal([]types.GasRefund{ + { + Type: "/kyve.bundles.v1beta1.MsgSubmitBundleProposal", + Fraction: sdk.MustNewDecFromStr("0.75"), + }, + })) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(sdk.MustNewDecFromStr("0.2"))) + }) + + It("Update no params", func() { + // ARRANGE + payload := `{}` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update with invalid formatted payload", func() { + // ARRANGE + payload := `{ + min_gas_price: "0.5", + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update min gas price", func() { + // ARRANGE + payload := `{ + "min_gas_price": "1.5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(sdk.MustNewDecFromStr("1.5"))) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update min gas price with invalid value", func() { + // ARRANGE + payload := `{ + "min_gas_price": "hello" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update burn ratio", func() { + // ARRANGE + payload := `{ + "burn_ratio": "0.5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(sdk.MustNewDecFromStr("0.5"))) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update burn ratio with invalid value", func() { + // ARRANGE + payload := `{ + "burn_ratio": "1.1" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update gas adjustments", func() { + // ARRANGE + payload := `{ + "gas_adjustments": [{ + "type": "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + "amount": 20 + }] + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(Equal([]types.GasAdjustment{ + { + Type: "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + Amount: 20, + }, + })) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update gas adjustments with invalid value", func() { + // ARRANGE + payload := `{ + "gas_adjustments": [{ + "type": "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + "amount": -20 + }], + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update gas refunds", func() { + // ARRANGE + payload := `{ + "gas_refunds": [{ + "type": "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + "fraction": "0.5" + }] + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(Equal([]types.GasRefund{ + { + Type: "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + Fraction: sdk.MustNewDecFromStr("0.5"), + }, + })) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update gas refunds with invalid value", func() { + // ARRANGE + payload := `{ + "gas_refunds": [{ + "type": "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + "fraction": "-1.5" + }] + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) + + It("Update min gas price", func() { + // ARRANGE + payload := `{ + "min_initial_deposit_ratio": "0.5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(sdk.MustNewDecFromStr("0.5"))) + }) + + It("Update min gas price with invalid value", func() { + // ARRANGE + payload := `{ + "min_initial_deposit_ratio": "1.5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().GlobalKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.MinGasPrice).To(Equal(types.DefaultMinGasPrice)) + Expect(updatedParams.BurnRatio).To(Equal(types.DefaultBurnRatio)) + Expect(updatedParams.GasAdjustments).To(BeNil()) + Expect(updatedParams.GasRefunds).To(BeNil()) + Expect(updatedParams.MinInitialDepositRatio).To(Equal(types.DefaultMinInitialDepositRatio)) + }) +}) diff --git a/x/global/module.go b/x/global/module.go new file mode 100644 index 00000000..0e633ec3 --- /dev/null +++ b/x/global/module.go @@ -0,0 +1,169 @@ +package global + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + abci "github.com/tendermint/tendermint/abci/types" + + // Auth + authKeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // Global + "github.com/KYVENetwork/chain/x/global/client/cli" + "github.com/KYVENetwork/chain/x/global/keeper" + "github.com/KYVENetwork/chain/x/global/types" + // Upgrade + upgradeKeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return types.ValidateGenesis(genState) +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + _ = types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + ak authKeeper.AccountKeeper + bk bankKeeper.Keeper + keeper keeper.Keeper + uk upgradeKeeper.Keeper +} + +func NewAppModule( + cdc codec.Codec, + ak authKeeper.AccountKeeper, + bk bankKeeper.Keeper, + keeper keeper.Keeper, + uk upgradeKeeper.Keeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + ak: ak, + bk: bk, + keeper: keeper, + uk: uk, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + EndBlocker(ctx, am.ak, am.bk, am.keeper, am.uk) + + return []abci.ValidatorUpdate{} +} diff --git a/x/global/module_test.go b/x/global/module_test.go new file mode 100644 index 00000000..44910640 --- /dev/null +++ b/x/global/module_test.go @@ -0,0 +1,16 @@ +package global_test + +import ( + "fmt" + "testing" + + "github.com/KYVENetwork/chain/x/global/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestGlobalModule(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Test Suite", types.ModuleName)) +} diff --git a/x/global/post.go b/x/global/post.go new file mode 100644 index 00000000..efc83027 --- /dev/null +++ b/x/global/post.go @@ -0,0 +1,82 @@ +package global + +import ( + sdkErrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" + + // Auth + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // FeeGrant + feeGrantKeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + // Global + "github.com/KYVENetwork/chain/x/global/keeper" +) + +// RefundFeeDecorator + +type RefundFeeDecorator struct { + bankKeeper bankKeeper.Keeper + feeGrantKeeper feeGrantKeeper.Keeper + globalKeeper keeper.Keeper +} + +func NewRefundFeeDecorator(bk bankKeeper.Keeper, fk feeGrantKeeper.Keeper, gk keeper.Keeper) RefundFeeDecorator { + return RefundFeeDecorator{ + bankKeeper: bk, + feeGrantKeeper: fk, + globalKeeper: gk, + } +} + +func (rfd RefundFeeDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { + // Ensure that this is a fee transaction. + feeTx, ok := tx.(sdk.FeeTx) + if !ok { + return ctx, sdkErrors.Wrap(errorsTypes.ErrTxDecode, "Tx must be a FeeTx") + } + + // Return early if the transaction fee is zero (nothing to refund) + // or there are more than one message (can't refund). + fee := feeTx.GetFee() + msgs := feeTx.GetMsgs() + if fee.IsZero() || len(msgs) != 1 { + return next(ctx, tx, simulate) + } + + // Find the refund percentage based on the transaction message type. + refundPercentage := sdk.ZeroDec() + gasRefunds := rfd.globalKeeper.GetGasRefunds(ctx) + for _, refund := range gasRefunds { + if sdk.MsgTypeURL(msgs[0]) == refund.Type { + refundPercentage = refund.Fraction + break + } + } + + // Return early if the refund percentage is zero. + if refundPercentage.IsZero() { + return next(ctx, tx, simulate) + } + + // Calculate the refund amount. + refund := sdk.NewCoins() + for _, coin := range fee { + amount := sdk.NewDecFromInt(coin.Amount).Mul(refundPercentage) + refund = refund.Add(sdk.NewCoin(coin.Denom, amount.TruncateInt())) + } + + // Send the refund back to this transaction's fee payer. + account, err := GetFeeAccount(ctx, feeTx, rfd.feeGrantKeeper) + if err != nil { + return ctx, err + } + err = rfd.bankKeeper.SendCoinsFromModuleToAccount(ctx, authTypes.FeeCollectorName, account, refund) + if err != nil { + return ctx, err + } + + return next(ctx, tx, simulate) +} diff --git a/x/global/post_test.go b/x/global/post_test.go new file mode 100644 index 00000000..59932b93 --- /dev/null +++ b/x/global/post_test.go @@ -0,0 +1,226 @@ +package global_test + +import ( + "cosmossdk.io/math" + i "github.com/KYVENetwork/chain/testutil/integration" + bundlesTypes "github.com/KYVENetwork/chain/x/bundles/types" + stakersTypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Global + "github.com/KYVENetwork/chain/x/global" + "github.com/KYVENetwork/chain/x/global/types" +) + +/* + +TEST CASES - RefundFeeDecorator + +* Non-refundable message +* Refund 0% +* Refund 10% +* Refund 2/3 % +* Refund 100% +* Don't refund multiple + +*/ + +var _ = Describe("RefundFeeDecorator", Ordered, func() { + s := i.NewCleanChain() + encodingConfig := BuildEncodingConfig() + rfd := global.NewRefundFeeDecorator(s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper) + dfd := global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + denom := s.App().StakingKeeper.BondDenom(s.Ctx()) + + accountBalanceBefore := s.GetBalanceFromAddress(i.DUMMY[0]) + + BeforeEach(func() { + s = i.NewCleanChain() + + accountBalanceBefore = s.GetBalanceFromAddress(i.DUMMY[0]) + rfd = global.NewRefundFeeDecorator(s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper) + dfd = global.NewDeductFeeDecorator(s.App().AccountKeeper, s.App().BankKeeper, s.App().FeeGrantKeeper, s.App().GlobalKeeper, s.App().StakingKeeper) + + denom = s.App().StakingKeeper.BondDenom(s.Ctx()) + + params := types.DefaultParams() + params.GasRefunds = []types.GasRefund{ + { + Type: "/kyve.bundles.v1beta1.MsgSubmitBundleProposal", + Fraction: sdk.NewDec(1).QuoInt64(10), + }, + { + Type: "/kyve.bundles.v1beta1.MsgVoteBundleProposal", + Fraction: sdk.OneDec(), + }, + { + Type: "/kyve.bundles.v1beta1.MsgSkipUploaderRole", + Fraction: sdk.ZeroDec(), + }, + { + Type: "/kyve.stakers.v1beta1.MsgCreateStaker", + Fraction: sdk.NewDec(2).QuoInt64(3), + }, + } + s.App().GlobalKeeper.SetParams(s.Ctx(), params) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Non-refundable message", func() { + // ARRANGE + msg := bundlesTypes.MsgClaimUploaderRole{Creator: i.ALICE} + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + fees := sdk.NewCoins(sdk.NewCoin(denom, math.NewInt(1).MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + // ACT + _, errAnte := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + _, errPost := rfd.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.ALICE) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(errAnte).Should(Not(HaveOccurred())) + Expect(errPost).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 200_000)) + Expect(collectorBalanceAfter).To(Equal(uint64(200_000))) + }) + + It("Refund 0%", func() { + // ARRANGE + msg := bundlesTypes.MsgSkipUploaderRole{Creator: i.ALICE} + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + fees := sdk.NewCoins(sdk.NewCoin(denom, math.NewInt(1).MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + // ACT + _, errAnte := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + _, errPost := rfd.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.ALICE) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(errAnte).Should(Not(HaveOccurred())) + Expect(errPost).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 200_000)) + Expect(collectorBalanceAfter).To(Equal(uint64(200_000))) + }) + + It("Refund 10%", func() { + // ARRANGE + msg := bundlesTypes.MsgSubmitBundleProposal{Creator: i.ALICE} + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + fees := sdk.NewCoins(sdk.NewCoin(denom, math.NewInt(1).MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + // ACT + _, errAnte := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + _, errPost := rfd.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.ALICE) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(errAnte).Should(Not(HaveOccurred())) + Expect(errPost).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 180_000)) + Expect(collectorBalanceAfter).To(Equal(uint64(180_000))) + }) + + It("Refund 2/3 %", func() { + // ARRANGE + msg := stakersTypes.MsgCreateStaker{Creator: i.ALICE} + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + fees := sdk.NewCoins(sdk.NewCoin(denom, math.NewInt(1).MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + // ACT + _, errAnte := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + _, errPost := rfd.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.ALICE) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(errAnte).Should(Not(HaveOccurred())) + Expect(errPost).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + (66_667))) + Expect(collectorBalanceAfter).To(Equal(uint64(66_667))) + }) + + It("Refund 100%", func() { + // ARRANGE + msg := bundlesTypes.MsgVoteBundleProposal{Creator: i.ALICE} + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + fees := sdk.NewCoins(sdk.NewCoin(denom, math.NewInt(1).MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + _ = txBuilder.SetMsgs(&msg) + tx := txBuilder.GetTx() + + // ACT + _, errAnte := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + _, errPost := rfd.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.ALICE) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(errAnte).Should(Not(HaveOccurred())) + Expect(errPost).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter)) + Expect(collectorBalanceAfter).To(Equal(uint64(0))) + }) + + It("Don't refund multiple", func() { + // ARRANGE + msg1 := bundlesTypes.MsgVoteBundleProposal{Creator: i.ALICE} + msg2 := bundlesTypes.MsgSkipUploaderRole{Creator: i.ALICE} + msg3 := stakersTypes.MsgJoinPool{Creator: i.ALICE} + txBuilder := encodingConfig.TxConfig.NewTxBuilder() + gasLimit := uint64(200_000) + txBuilder.SetGasLimit(gasLimit) + fees := sdk.NewCoins(sdk.NewCoin(denom, math.NewInt(1).MulRaw(int64(gasLimit)))) + txBuilder.SetFeeAmount(fees) + _ = txBuilder.SetMsgs(&msg1, &msg2, &msg3) + tx := txBuilder.GetTx() + + // ACT + _, errAnte := dfd.AnteHandle(s.Ctx(), tx, false, NextFn) + _, errPost := rfd.AnteHandle(s.Ctx(), tx, false, NextFn) + + // ASSERT + accountBalanceAfter := s.GetBalanceFromAddress(i.ALICE) + collectorBalanceAfter := s.GetBalanceFromModule(authTypes.FeeCollectorName) + + Expect(errAnte).Should(Not(HaveOccurred())) + Expect(errPost).Should(Not(HaveOccurred())) + Expect(accountBalanceBefore).To(Equal(accountBalanceAfter + 200_000)) + Expect(collectorBalanceAfter).To(Equal(uint64(200_000))) + }) +}) diff --git a/x/global/types/codec.go b/x/global/types/codec.go new file mode 100644 index 00000000..4e55f946 --- /dev/null +++ b/x/global/types/codec.go @@ -0,0 +1,18 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterCodec(_ *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateParams{}) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/global/types/genesis.go b/x/global/types/genesis.go new file mode 100644 index 00000000..f86b87f5 --- /dev/null +++ b/x/global/types/genesis.go @@ -0,0 +1,20 @@ +package types + +// NewGenesisState creates a new GenesisState object +func NewGenesisState(params Params) *GenesisState { + return &GenesisState{ + Params: params, + } +} + +// DefaultGenesisState creates a default GenesisState object +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +// ValidateGenesis validates the provided genesis state to ensure the expected invariants holds. +func ValidateGenesis(data GenesisState) error { + return data.Params.Validate() +} diff --git a/x/global/types/genesis.pb.go b/x/global/types/genesis.pb.go new file mode 100644 index 00000000..dbb03001 --- /dev/null +++ b/x/global/types/genesis.pb.go @@ -0,0 +1,323 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/global/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the global module's genesis state. +type GenesisState struct { + // params defines all the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_c35b7ff881baba68, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "kyve.global.v1beta1.GenesisState") +} + +func init() { proto.RegisterFile("kyve/global/v1beta1/genesis.proto", fileDescriptor_c35b7ff881baba68) } + +var fileDescriptor_c35b7ff881baba68 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcc, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0xcf, 0xc9, 0x4f, 0x4a, 0xcc, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, + 0x29, 0xd1, 0x83, 0x28, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, + 0x83, 0x58, 0x10, 0xa5, 0x52, 0x0a, 0x58, 0x4d, 0x83, 0xe8, 0x04, 0xab, 0x50, 0xf2, 0xe4, 0xe2, + 0x71, 0x87, 0x98, 0x1e, 0x5c, 0x92, 0x58, 0x92, 0x2a, 0x64, 0xc9, 0xc5, 0x56, 0x90, 0x58, 0x94, + 0x98, 0x5b, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xad, 0x87, 0xc5, 0x36, 0xbd, 0x00, + 0xb0, 0x12, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xa0, 0x1a, 0x9c, 0x5c, 0x4f, 0x3c, 0x92, + 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, + 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0x3b, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, + 0x39, 0x3f, 0x57, 0xdf, 0x3b, 0x32, 0xcc, 0xd5, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x5b, 0x3f, + 0x39, 0x23, 0x31, 0x33, 0x4f, 0xbf, 0x02, 0xe6, 0xc0, 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, + 0xb0, 0xc3, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x72, 0xf3, 0x92, 0x32, 0x0a, 0x01, 0x00, + 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/global/types/global.pb.go b/x/global/types/global.pb.go new file mode 100644 index 00000000..7d68e674 --- /dev/null +++ b/x/global/types/global.pb.go @@ -0,0 +1,990 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/global/v1beta1/global.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the global module parameters. +type Params struct { + // min_gas_price defines the minimum gas price value for all transactions. + MinGasPrice github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,1,opt,name=min_gas_price,json=minGasPrice,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"min_gas_price"` + // burn_ratio defines the ratio of transaction fees burnt. + BurnRatio github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,2,opt,name=burn_ratio,json=burnRatio,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"burn_ratio"` + // gas_adjustments can add a constant amount of gas to a specific message type. + // This gives more control to make certain messages more expensive to avoid spamming + // of certain types of messages. + GasAdjustments []GasAdjustment `protobuf:"bytes,3,rep,name=gas_adjustments,json=gasAdjustments,proto3" json:"gas_adjustments"` + // gas_refunds lets the governance specify a fraction of how much gas + // a user gets refunded for a certain type of transaction. + // This could be used to make transactions which support to network cheaper. + // Gas refunds only work if the transaction only included one message. + GasRefunds []GasRefund `protobuf:"bytes,4,rep,name=gas_refunds,json=gasRefunds,proto3" json:"gas_refunds"` + // min_initial_deposit_ratio sets a minimum fraction of initial deposit for a + // governance proposal. This is used to avoid spamming of proposals and + // polluting the proposals page. + MinInitialDepositRatio github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,5,opt,name=min_initial_deposit_ratio,json=minInitialDepositRatio,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"min_initial_deposit_ratio"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_d1b5d4c0bbdf8bfb, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetGasAdjustments() []GasAdjustment { + if m != nil { + return m.GasAdjustments + } + return nil +} + +func (m *Params) GetGasRefunds() []GasRefund { + if m != nil { + return m.GasRefunds + } + return nil +} + +// GasAdjustment stores for every message type a fixed amount +// of gas which is added to the message +type GasAdjustment struct { + // type of the sdk-message + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // amount of gas which is added to the message + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *GasAdjustment) Reset() { *m = GasAdjustment{} } +func (m *GasAdjustment) String() string { return proto.CompactTextString(m) } +func (*GasAdjustment) ProtoMessage() {} +func (*GasAdjustment) Descriptor() ([]byte, []int) { + return fileDescriptor_d1b5d4c0bbdf8bfb, []int{1} +} +func (m *GasAdjustment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GasAdjustment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GasAdjustment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GasAdjustment) XXX_Merge(src proto.Message) { + xxx_messageInfo_GasAdjustment.Merge(m, src) +} +func (m *GasAdjustment) XXX_Size() int { + return m.Size() +} +func (m *GasAdjustment) XXX_DiscardUnknown() { + xxx_messageInfo_GasAdjustment.DiscardUnknown(m) +} + +var xxx_messageInfo_GasAdjustment proto.InternalMessageInfo + +func (m *GasAdjustment) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *GasAdjustment) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// GasRefund stores the fraction of gas which will be refunded for a given +// type of message. +// This only works if the transaction only includes one message. +type GasRefund struct { + // type of the sdk-message + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // fraction in decimal representation between 0 and 1 + Fraction github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,2,opt,name=fraction,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"fraction"` +} + +func (m *GasRefund) Reset() { *m = GasRefund{} } +func (m *GasRefund) String() string { return proto.CompactTextString(m) } +func (*GasRefund) ProtoMessage() {} +func (*GasRefund) Descriptor() ([]byte, []int) { + return fileDescriptor_d1b5d4c0bbdf8bfb, []int{2} +} +func (m *GasRefund) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GasRefund) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GasRefund.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GasRefund) XXX_Merge(src proto.Message) { + xxx_messageInfo_GasRefund.Merge(m, src) +} +func (m *GasRefund) XXX_Size() int { + return m.Size() +} +func (m *GasRefund) XXX_DiscardUnknown() { + xxx_messageInfo_GasRefund.DiscardUnknown(m) +} + +var xxx_messageInfo_GasRefund proto.InternalMessageInfo + +func (m *GasRefund) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func init() { + proto.RegisterType((*Params)(nil), "kyve.global.v1beta1.Params") + proto.RegisterType((*GasAdjustment)(nil), "kyve.global.v1beta1.GasAdjustment") + proto.RegisterType((*GasRefund)(nil), "kyve.global.v1beta1.GasRefund") +} + +func init() { proto.RegisterFile("kyve/global/v1beta1/global.proto", fileDescriptor_d1b5d4c0bbdf8bfb) } + +var fileDescriptor_d1b5d4c0bbdf8bfb = []byte{ + // 408 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0xce, 0xd2, 0x40, + 0x10, 0xc7, 0x5b, 0xa9, 0x44, 0x96, 0x7c, 0x9a, 0xac, 0xe6, 0x4b, 0xf5, 0x50, 0x48, 0x0f, 0x86, + 0xc4, 0xd8, 0x06, 0x3d, 0x7a, 0x92, 0x40, 0x88, 0x1a, 0x0d, 0xf6, 0x60, 0xa2, 0x97, 0x66, 0xdb, + 0x2e, 0x65, 0x2d, 0xbb, 0xdb, 0xec, 0x6e, 0x51, 0xde, 0xc2, 0x47, 0xf1, 0x31, 0x38, 0x72, 0x34, + 0x1e, 0x88, 0x81, 0x17, 0x31, 0xbb, 0x2d, 0x04, 0x13, 0xbc, 0x70, 0xea, 0xcc, 0xf4, 0x3f, 0xbf, + 0xf6, 0x3f, 0x33, 0xa0, 0x5f, 0xac, 0x57, 0x38, 0xcc, 0x97, 0x3c, 0x41, 0xcb, 0x70, 0x35, 0x4c, + 0xb0, 0x42, 0xc3, 0x26, 0x0d, 0x4a, 0xc1, 0x15, 0x87, 0x0f, 0xb5, 0x22, 0x68, 0x4a, 0x8d, 0xe2, + 0xc9, 0xa3, 0x9c, 0xe7, 0xdc, 0xbc, 0x0f, 0x75, 0x54, 0x4b, 0xfd, 0x9f, 0x2d, 0xd0, 0x9e, 0x21, + 0x81, 0xa8, 0x84, 0x11, 0xb8, 0xa1, 0x84, 0xc5, 0x39, 0x92, 0x71, 0x29, 0x48, 0x8a, 0x5d, 0xbb, + 0x6f, 0x0f, 0x3a, 0xa3, 0x60, 0xb3, 0xeb, 0x59, 0xbf, 0x77, 0xbd, 0xa7, 0x39, 0x51, 0x8b, 0x2a, + 0x09, 0x52, 0x4e, 0xc3, 0x94, 0x4b, 0xca, 0x65, 0xf3, 0x78, 0x2e, 0xb3, 0x22, 0x54, 0xeb, 0x12, + 0xcb, 0x60, 0x8c, 0xd3, 0xa8, 0x4b, 0x09, 0x9b, 0x22, 0x39, 0xd3, 0x08, 0xf8, 0x1e, 0x80, 0xa4, + 0x12, 0x2c, 0x16, 0x48, 0x11, 0xee, 0xde, 0xb9, 0x0a, 0xd8, 0xd1, 0x84, 0x48, 0x03, 0xe0, 0x47, + 0xf0, 0x40, 0xff, 0x1e, 0xca, 0xbe, 0x56, 0x52, 0x51, 0xcc, 0x94, 0x74, 0x5b, 0xfd, 0xd6, 0xa0, + 0xfb, 0xc2, 0x0f, 0x2e, 0x58, 0x0e, 0xa6, 0x48, 0xbe, 0x3e, 0x49, 0x47, 0x8e, 0xfe, 0x6e, 0x74, + 0x3f, 0x3f, 0x2f, 0x4a, 0x38, 0x01, 0x5d, 0x8d, 0x14, 0x78, 0x5e, 0xb1, 0x4c, 0xba, 0x8e, 0xc1, + 0x79, 0xff, 0xc3, 0x45, 0x46, 0xd6, 0xa0, 0x40, 0x7e, 0x2c, 0x48, 0x48, 0xc0, 0x63, 0x3d, 0x3c, + 0xc2, 0x88, 0x22, 0x68, 0x19, 0x67, 0xb8, 0xe4, 0x92, 0xa8, 0xc6, 0xf7, 0xdd, 0xab, 0x7c, 0xdf, + 0x52, 0xc2, 0xde, 0xd4, 0xbc, 0x71, 0x8d, 0x33, 0x43, 0xf0, 0x5f, 0x81, 0x9b, 0x7f, 0x8c, 0x41, + 0x08, 0x1c, 0xdd, 0x55, 0xef, 0x2b, 0x32, 0x31, 0xbc, 0x05, 0x6d, 0x44, 0x79, 0xc5, 0x94, 0x19, + 0xba, 0x13, 0x35, 0x99, 0x5f, 0x80, 0xce, 0xc9, 0xc6, 0xc5, 0xc6, 0xb7, 0xe0, 0xde, 0x5c, 0xa0, + 0x54, 0x11, 0xce, 0xae, 0xdc, 0xd7, 0xa9, 0x7f, 0x34, 0xd9, 0xec, 0x3d, 0x7b, 0xbb, 0xf7, 0xec, + 0x3f, 0x7b, 0xcf, 0xfe, 0x71, 0xf0, 0xac, 0xed, 0xc1, 0xb3, 0x7e, 0x1d, 0x3c, 0xeb, 0xcb, 0xb3, + 0x33, 0xd6, 0xbb, 0xcf, 0x9f, 0x26, 0x1f, 0xb0, 0xfa, 0xc6, 0x45, 0x11, 0xa6, 0x0b, 0x44, 0x58, + 0xf8, 0xfd, 0x78, 0xdd, 0x06, 0x9a, 0xb4, 0xcd, 0xa9, 0xbe, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0x49, 0xa4, 0x7a, 0xa9, 0xf9, 0x02, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.MinInitialDepositRatio.Size() + i -= size + if _, err := m.MinInitialDepositRatio.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGlobal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.GasRefunds) > 0 { + for iNdEx := len(m.GasRefunds) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.GasRefunds[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGlobal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.GasAdjustments) > 0 { + for iNdEx := len(m.GasAdjustments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.GasAdjustments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGlobal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size := m.BurnRatio.Size() + i -= size + if _, err := m.BurnRatio.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGlobal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.MinGasPrice.Size() + i -= size + if _, err := m.MinGasPrice.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGlobal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GasAdjustment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GasAdjustment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GasAdjustment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintGlobal(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGlobal(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GasRefund) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GasRefund) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GasRefund) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Fraction.Size() + i -= size + if _, err := m.Fraction.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintGlobal(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGlobal(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGlobal(dAtA []byte, offset int, v uint64) int { + offset -= sovGlobal(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.MinGasPrice.Size() + n += 1 + l + sovGlobal(uint64(l)) + l = m.BurnRatio.Size() + n += 1 + l + sovGlobal(uint64(l)) + if len(m.GasAdjustments) > 0 { + for _, e := range m.GasAdjustments { + l = e.Size() + n += 1 + l + sovGlobal(uint64(l)) + } + } + if len(m.GasRefunds) > 0 { + for _, e := range m.GasRefunds { + l = e.Size() + n += 1 + l + sovGlobal(uint64(l)) + } + } + l = m.MinInitialDepositRatio.Size() + n += 1 + l + sovGlobal(uint64(l)) + return n +} + +func (m *GasAdjustment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovGlobal(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovGlobal(uint64(m.Amount)) + } + return n +} + +func (m *GasRefund) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovGlobal(uint64(l)) + } + l = m.Fraction.Size() + n += 1 + l + sovGlobal(uint64(l)) + return n +} + +func sovGlobal(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGlobal(x uint64) (n int) { + return sovGlobal(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinGasPrice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MinGasPrice.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BurnRatio", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BurnRatio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GasAdjustments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GasAdjustments = append(m.GasAdjustments, GasAdjustment{}) + if err := m.GasAdjustments[len(m.GasAdjustments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GasRefunds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GasRefunds = append(m.GasRefunds, GasRefund{}) + if err := m.GasRefunds[len(m.GasRefunds)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinInitialDepositRatio", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MinInitialDepositRatio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGlobal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGlobal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GasAdjustment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GasAdjustment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GasAdjustment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGlobal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGlobal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GasRefund) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GasRefund: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GasRefund: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fraction", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGlobal + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGlobal + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGlobal + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Fraction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGlobal(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGlobal + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGlobal(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGlobal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGlobal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGlobal + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGlobal + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGlobal + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGlobal + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGlobal = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGlobal = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGlobal = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/global/types/keys.go b/x/global/types/keys.go new file mode 100644 index 00000000..c4bb4dad --- /dev/null +++ b/x/global/types/keys.go @@ -0,0 +1,17 @@ +package types + +const ( + // ModuleName defines the module name + ModuleName = "global" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey defines the module's message routing key + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_global" +) + +var ParamsKey = []byte{0x00} diff --git a/x/global/types/msgs.go b/x/global/types/msgs.go new file mode 100644 index 00000000..74c48a95 --- /dev/null +++ b/x/global/types/msgs.go @@ -0,0 +1,34 @@ +package types + +import ( + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ sdk.Msg = &MsgUpdateParams{} + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (msg *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + params := DefaultParams() + if err := json.Unmarshal([]byte(msg.Payload), ¶ms); err != nil { + return err + } + if err := params.Validate(); err != nil { + return err + } + + return nil +} diff --git a/x/global/types/params.go b/x/global/types/params.go new file mode 100644 index 00000000..a376e708 --- /dev/null +++ b/x/global/types/params.go @@ -0,0 +1,172 @@ +package types + +import ( + "fmt" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// DefaultMinGasPrice is 0 (i.e. disabled) +var DefaultMinGasPrice = sdk.NewDec(0) + +// DefaultBurnRatio is 0% (i.e. disabled) +var DefaultBurnRatio = sdk.NewDec(0) + +// DefaultMinInitialDepositRatio is 0% (i.e. disabled) +var DefaultMinInitialDepositRatio = sdk.NewDec(0) + +// NewParams creates a new Params instance +func NewParams(minGasPrice sdk.Dec, burnRatio sdk.Dec, gasAdjustments []GasAdjustment, gasRefunds []GasRefund, minInitialDepositRatio sdk.Dec) Params { + return Params{ + MinGasPrice: minGasPrice, + BurnRatio: burnRatio, + GasAdjustments: gasAdjustments, + GasRefunds: gasRefunds, + MinInitialDepositRatio: minInitialDepositRatio, + } +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams(DefaultMinGasPrice, DefaultBurnRatio, []GasAdjustment{}, []GasRefund{}, DefaultMinInitialDepositRatio) +} + +// Validate validates the set of params +func (p Params) Validate() error { + if err := validateMinGasPrice(p.MinGasPrice); err != nil { + return err + } + + if err := validateBurnRatio(p.BurnRatio); err != nil { + return err + } + + for _, gasAdjustment := range p.GasAdjustments { + if err := validateGasAdjustment(gasAdjustment); err != nil { + return err + } + } + + for _, gasRefund := range p.GasRefunds { + if err := validateGasRefund(gasRefund); err != nil { + return err + } + } + + if err := validateMinInitialDepositRatio(p.MinInitialDepositRatio); err != nil { + return err + } + + return nil +} + +// validateMinGasPrice ... +func validateMinGasPrice(i interface{}) error { + v, ok := i.(sdk.Dec) + + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNil() { + return fmt.Errorf("invalid parameter: nil") + } + + if v.IsNegative() { + return fmt.Errorf("value cannot be negative: %s", i) + } + + return nil +} + +// validateBurnRatio ... +func validateBurnRatio(i interface{}) error { + v, ok := i.(sdk.Dec) + + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNil() { + return fmt.Errorf("invalid parameter: nil") + } + + if v.IsNegative() { + return fmt.Errorf("value cannot be negative: %s", i) + } + + if v.GT(sdk.OneDec()) { + return fmt.Errorf("value cannot be greater than 1: %s", v) + } + + return nil +} + +// validateGasAdjustment ... +func validateGasAdjustment(i interface{}) error { + v, ok := i.(GasAdjustment) + + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + amount := math.NewInt(int64(v.Amount)) + + if amount.IsNil() { + return fmt.Errorf("invalid parameter: nil") + } + + if amount.IsNegative() { + return fmt.Errorf("value cannot be negative: %s", amount) + } + + return nil +} + +// validateGasRefund ... +func validateGasRefund(i interface{}) error { + v, ok := i.(GasRefund) + + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.Fraction.IsNil() { + return fmt.Errorf("invalid parameter: nil") + } + + if v.Fraction.IsNegative() { + return fmt.Errorf("value cannot be negative: %s", v.Fraction) + } + + if v.Fraction.GT(sdk.OneDec()) { + return fmt.Errorf("value cannot be greater than 1: %s", v.Fraction) + } + + return nil +} + +// validateMinInitialDepositRatio ... +func validateMinInitialDepositRatio(i interface{}) error { + v, ok := i.(sdk.Dec) + + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.IsNil() { + return fmt.Errorf("invalid parameter: nil") + } + + if v.IsNegative() { + return fmt.Errorf("value cannot be negative: %s", i) + } + + if v.GT(sdk.OneDec()) { + return fmt.Errorf("value cannot be greater than 1: %s", v) + } + + return nil +} diff --git a/x/global/types/query.pb.go b/x/global/types/query.pb.go new file mode 100644 index 00000000..145c8c8a --- /dev/null +++ b/x/global/types/query.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/global/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_117f917a03a4039c, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_117f917a03a4039c, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "kyve.global.v1beta1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "kyve.global.v1beta1.QueryParamsResponse") +} + +func init() { proto.RegisterFile("kyve/global/v1beta1/query.proto", fileDescriptor_117f917a03a4039c) } + +var fileDescriptor_117f917a03a4039c = []byte{ + // 291 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcf, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0xcf, 0xc9, 0x4f, 0x4a, 0xcc, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, + 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0x29, 0xd0, + 0x83, 0x28, 0xd0, 0x83, 0x2a, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, 0x83, 0x58, + 0x10, 0xa5, 0x52, 0x32, 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x89, 0x05, 0x99, 0xfa, 0x89, + 0x79, 0x79, 0xf9, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xc5, 0x50, 0x59, 0x05, 0x6c, 0x36, 0x41, + 0xcd, 0x05, 0xab, 0x50, 0x12, 0xe1, 0x12, 0x0a, 0x04, 0xd9, 0x1c, 0x90, 0x58, 0x94, 0x98, 0x5b, + 0x1c, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xa2, 0x14, 0xc0, 0x25, 0x8c, 0x22, 0x5a, 0x5c, 0x90, + 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc9, 0xc5, 0x56, 0x00, 0x16, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, + 0x36, 0x92, 0xd6, 0xc3, 0xe2, 0x50, 0x3d, 0x88, 0x26, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, + 0xa0, 0x1a, 0x8c, 0xba, 0x18, 0xb9, 0x58, 0xc1, 0x46, 0x0a, 0x35, 0x30, 0x72, 0xb1, 0x41, 0x94, + 0x08, 0xa9, 0x63, 0xd5, 0x8f, 0xe9, 0x1e, 0x29, 0x0d, 0xc2, 0x0a, 0x21, 0x4e, 0x54, 0x52, 0x6e, + 0xba, 0xfc, 0x64, 0x32, 0x93, 0xac, 0x90, 0xb4, 0x3e, 0x36, 0xaf, 0x43, 0x1c, 0xe3, 0xe4, 0x7a, + 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, + 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, + 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xde, 0x91, 0x61, 0xae, 0x7e, 0xa9, 0x25, 0xe5, 0xf9, 0x45, + 0xd9, 0xfa, 0xc9, 0x19, 0x89, 0x99, 0x79, 0xfa, 0x15, 0x30, 0xf3, 0x4a, 0x2a, 0x0b, 0x52, 0x8b, + 0x93, 0xd8, 0xc0, 0x41, 0x68, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x44, 0x21, 0xc5, 0xfb, 0xd0, + 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.global.v1beta1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.global.v1beta1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.global.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/global/v1beta1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/global/types/query.pb.gw.go b/x/global/types/query.pb.gw.go new file mode 100644 index 00000000..79a454e2 --- /dev/null +++ b/x/global/types/query.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/global/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "global", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/x/global/types/tx.pb.go b/x/global/types/tx.pb.go new file mode 100644 index 00000000..f7f9c33b --- /dev/null +++ b/x/global/types/tx.pb.go @@ -0,0 +1,588 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/global/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams defines a SDK message for updating the module parameters. +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // payload defines the x/global parameters to update. + Payload string `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_69d8ea894bb09a0e, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_69d8ea894bb09a0e, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "kyve.global.v1beta1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "kyve.global.v1beta1.MsgUpdateParamsResponse") +} + +func init() { proto.RegisterFile("kyve/global/v1beta1/tx.proto", fileDescriptor_69d8ea894bb09a0e) } + +var fileDescriptor_69d8ea894bb09a0e = []byte{ + // 274 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xc9, 0xae, 0x2c, 0x4b, + 0xd5, 0x4f, 0xcf, 0xc9, 0x4f, 0x4a, 0xcc, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, + 0x2f, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0xc9, 0xea, 0x41, 0x64, 0xf5, + 0xa0, 0xb2, 0x52, 0x92, 0xc9, 0xf9, 0xc5, 0xb9, 0xf9, 0xc5, 0xf1, 0x60, 0x25, 0xfa, 0x10, 0x0e, + 0x44, 0xbd, 0x52, 0x32, 0x17, 0xbf, 0x6f, 0x71, 0x7a, 0x68, 0x41, 0x4a, 0x62, 0x49, 0x6a, 0x40, + 0x62, 0x51, 0x62, 0x6e, 0xb1, 0x90, 0x19, 0x17, 0x67, 0x62, 0x69, 0x49, 0x46, 0x7e, 0x51, 0x66, + 0x49, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xc4, 0xa5, 0x2d, 0xba, 0x22, 0x50, 0x7d, + 0x8e, 0x29, 0x29, 0x45, 0xa9, 0xc5, 0xc5, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x41, 0x08, 0xa5, + 0x42, 0x12, 0x5c, 0xec, 0x05, 0x89, 0x95, 0x39, 0xf9, 0x89, 0x29, 0x12, 0x4c, 0x20, 0x5d, 0x41, + 0x30, 0xae, 0x92, 0x24, 0x97, 0x38, 0x9a, 0x25, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, + 0x46, 0x99, 0x5c, 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0x49, 0x5c, 0x3c, 0x28, 0x6e, 0x50, 0xd1, 0xc3, + 0xe2, 0x0f, 0x3d, 0x34, 0x43, 0xa4, 0x74, 0x88, 0x51, 0x05, 0xb3, 0xca, 0xc9, 0xf5, 0xc4, 0x23, + 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, + 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xb4, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, + 0x92, 0xf3, 0x73, 0xf5, 0xbd, 0x23, 0xc3, 0x5c, 0xfd, 0x52, 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xf5, + 0x93, 0x33, 0x12, 0x33, 0xf3, 0xf4, 0x2b, 0x60, 0x81, 0x5d, 0x52, 0x59, 0x90, 0x5a, 0x9c, 0xc4, + 0x06, 0x0e, 0x38, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbe, 0x62, 0xf8, 0x18, 0x88, 0x01, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // UpdateParams defines a governance operation for updating the x/global + // module parameters. The authority is hard-coded to the x/gov module + // account. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.global.v1beta1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // UpdateParams defines a governance operation for updating the x/global + // module parameters. The authority is hard-coded to the x/gov module + // account. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.global.v1beta1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.global.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/global/v1beta1/tx.proto", +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTx(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/global/types/types.go b/x/global/types/types.go new file mode 100644 index 00000000..1e1741b9 --- /dev/null +++ b/x/global/types/types.go @@ -0,0 +1,3 @@ +package types + +var Denom = "tkyve" diff --git a/x/global/utils.go b/x/global/utils.go new file mode 100644 index 00000000..06e25a20 --- /dev/null +++ b/x/global/utils.go @@ -0,0 +1,103 @@ +package global + +import ( + "math" + + sdkErrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" + + // Auth + "github.com/cosmos/cosmos-sdk/x/auth/ante" + // FeeGrant + feeGrantKeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + // Global + "github.com/KYVENetwork/chain/x/global/keeper" + // Staking + stakingKeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" +) + +func GetFeeAccount(ctx sdk.Context, tx sdk.FeeTx, feeGrantKeeper feeGrantKeeper.Keeper) (sdk.AccAddress, error) { + fee := tx.GetFee() + feePayer := tx.FeePayer() + feeGranter := tx.FeeGranter() + + account := feePayer + if feeGranter != nil && !feeGranter.Equals(feePayer) { + err := feeGrantKeeper.UseGrantedFees(ctx, feeGranter, feePayer, fee, tx.GetMsgs()) + if err != nil { + return nil, sdkErrors.Wrapf(err, "%s does not not allow to pay fees for %s", feeGranter, feePayer) + } + + account = feeGranter + } + + return account, nil +} + +// BuildTxFeeChecker ensures that the configured minimum gas price is met. +// In contrast to +// https://github.com/cosmos/cosmos-sdk/blob/release/v0.46.x/x/auth/ante/validator_tx_fee.go#L12 +// this code runs within the consensus layer. +func BuildTxFeeChecker(ctx sdk.Context, fk keeper.Keeper, sk stakingKeeper.Keeper) ante.TxFeeChecker { + consensusMinGasPrices := sdk.NewDecCoins(sdk.NewDecCoinFromDec(sk.BondDenom(ctx), fk.GetMinGasPrice(ctx))) + + return func(ctx sdk.Context, tx sdk.Tx) (sdk.Coins, int64, error) { + feeTx, ok := tx.(sdk.FeeTx) + if !ok { + return nil, 0, sdkErrors.Wrap(errorsTypes.ErrTxDecode, "Tx must be a FeeTx") + } + + feeCoins := feeTx.GetFee() + gas := feeTx.GetGas() + + validatorMinGasPrices := ctx.MinGasPrices() + + requiredFees := make(sdk.Coins, len(consensusMinGasPrices)) + + // Determine the required fees by multiplying each required minimum gas + // price by the gas limit, where fee = ceil(minGasPrice * gasLimit). + glDec := sdk.NewDec(int64(gas)) + for i, gp := range consensusMinGasPrices { + fee := gp.Amount.Mul(glDec) + requiredFees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt()) + } + + if ctx.IsCheckTx() { + validatorFees := make(sdk.Coins, len(validatorMinGasPrices)) + for i, gp := range validatorMinGasPrices { + fee := gp.Amount.Mul(glDec) + validatorFees[i] = sdk.NewCoin(gp.Denom, fee.Ceil().RoundInt()) + } + + requiredFees = requiredFees.Max(validatorFees) + } + + if !requiredFees.IsZero() && !feeCoins.IsAnyGTE(requiredFees) { + return nil, 0, sdkErrors.Wrapf(errorsTypes.ErrInsufficientFee, "insufficient fees; got: %s required: %s", feeCoins, requiredFees) + } + + priority := getTxPriority(feeCoins, int64(gas)) + return feeCoins, priority, nil + } +} + +// https://github.com/cosmos/cosmos-sdk/blob/release/v0.46.x/x/auth/ante/validator_tx_fee.go#L51 +// As the default DeductFeeDecorator is overwritten, this is the place to add a custom priority. +// Although this is calculated within "consensus-code" the priority itself gets only used +// for mem-pool ordering. +func getTxPriority(fee sdk.Coins, gas int64) int64 { + var priority int64 + for _, c := range fee { + p := int64(math.MaxInt64) + gasPrice := c.Amount.QuoRaw(gas) + if gasPrice.IsInt64() { + p = gasPrice.Int64() + } + if priority == 0 || p < priority { + priority = p + } + } + + return priority +} diff --git a/x/pool/client/cli/tx.go b/x/pool/client/cli/tx.go new file mode 100644 index 00000000..bc7c3b03 --- /dev/null +++ b/x/pool/client/cli/tx.go @@ -0,0 +1,25 @@ +package cli + +import ( + "fmt" + + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/spf13/cobra" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdFundPool()) + cmd.AddCommand(CmdDefundPool()) + + return cmd +} diff --git a/x/pool/client/cli/tx_defund_pool.go b/x/pool/client/cli/tx_defund_pool.go new file mode 100644 index 00000000..8bf60be8 --- /dev/null +++ b/x/pool/client/cli/tx_defund_pool.go @@ -0,0 +1,47 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdDefundPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "defund-pool [id] [amount]", + Short: "Broadcast message defund-pool", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + argAmount, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgDefundPool( + clientCtx.GetFromAddress().String(), + argId, + argAmount, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/pool/client/cli/tx_fund_pool.go b/x/pool/client/cli/tx_fund_pool.go new file mode 100644 index 00000000..aef09fbb --- /dev/null +++ b/x/pool/client/cli/tx_fund_pool.go @@ -0,0 +1,47 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdFundPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "fund-pool [id] [amount]", + Short: "Broadcast message fund-pool", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + argAmount, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.NewMsgFundPool( + clientCtx.GetFromAddress().String(), + argId, + argAmount, + ) + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/pool/genesis.go b/x/pool/genesis.go new file mode 100644 index 00000000..33d12006 --- /dev/null +++ b/x/pool/genesis.go @@ -0,0 +1,26 @@ +package pool + +import ( + "github.com/KYVENetwork/chain/x/pool/keeper" + "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the pool module's state from a provided genesis state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + for _, elem := range genState.PoolList { + k.SetPool(ctx, elem) + } + + k.SetPoolCount(ctx, genState.PoolCount) +} + +// ExportGenesis returns the pool module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + + genesis.PoolList = k.GetAllPools(ctx) + genesis.PoolCount = k.GetPoolCount(ctx) + + return genesis +} diff --git a/x/pool/keeper/getters_pool.go b/x/pool/keeper/getters_pool.go new file mode 100644 index 00000000..1714f386 --- /dev/null +++ b/x/pool/keeper/getters_pool.go @@ -0,0 +1,137 @@ +package keeper + +import ( + "encoding/binary" + "strings" + + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetPoolCount get the total number of pools +func (k Keeper) GetPoolCount(ctx sdk.Context) uint64 { + bz := ctx.KVStore(k.storeKey).Get(types.PoolCountKey) + if bz == nil { + return 0 + } + return binary.BigEndian.Uint64(bz) +} + +// SetPoolCount sets the total number of pools +func (k Keeper) SetPoolCount(ctx sdk.Context, count uint64) { + bz := make([]byte, 8) + binary.BigEndian.PutUint64(bz, count) + ctx.KVStore(k.storeKey).Set(types.PoolCountKey, bz) +} + +// AppendPool appends a pool in the store with a new id and updates the count +func (k Keeper) AppendPool(ctx sdk.Context, pool types.Pool) uint64 { + count := k.GetPoolCount(ctx) + // Set the ID of the appended value + pool.Id = count + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + appendedValue := k.cdc.MustMarshal(&pool) + store.Set(types.PoolKeyPrefix(pool.Id), appendedValue) + + // Update pool count + k.SetPoolCount(ctx, count+1) + + return count +} + +// SetPool sets a specific pool in the store +func (k Keeper) SetPool(ctx sdk.Context, pool types.Pool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + b := k.cdc.MustMarshal(&pool) + store.Set(types.PoolKeyPrefix(pool.Id), b) +} + +// GetPool returns a pool from its ID +func (k Keeper) GetPool(ctx sdk.Context, id uint64) (val types.Pool, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + b := store.Get(types.PoolKeyPrefix(id)) + if b == nil { + return val, false + } + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// RemovePool removes a pool from the store +func (k Keeper) RemovePool(ctx sdk.Context, id uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + store.Delete(types.PoolKeyPrefix(id)) +} + +// GetAllPools returns all pools +func (k Keeper) GetAllPools(ctx sdk.Context) (list []types.Pool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Pool + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// GetPaginatedPoolsQuery performs a full search on all pools with the given parameters. +func (k Keeper) GetPaginatedPoolsQuery( + ctx sdk.Context, + pagination *query.PageRequest, + search string, + runtime string, + disabled bool, + storageProviderId uint32, +) ([]types.Pool, *query.PageResponse, error) { + var pools []types.Pool + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + + pageRes, err := query.FilteredPaginate(store, pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + var pool types.Pool + if err := k.cdc.Unmarshal(value, &pool); err != nil { + return false, err + } + + // filter search + if !strings.Contains(strings.ToLower(pool.Name), strings.ToLower(search)) { + return false, nil + } + + // filter runtime + if runtime != "" && runtime != pool.Runtime { + return false, nil + } + + // filter disabled + if disabled != pool.Disabled { + return false, nil + } + + // filter storage provider id + if storageProviderId != 0 && storageProviderId != pool.CurrentStorageProviderId { + return false, nil + } + + if accumulate { + pools = append(pools, pool) + } + + return true, nil + }) + if err != nil { + return nil, nil, status.Error(codes.Internal, err.Error()) + } + + return pools, pageRes, nil +} diff --git a/x/pool/keeper/keeper.go b/x/pool/keeper/keeper.go new file mode 100644 index 00000000..cd9a4dc3 --- /dev/null +++ b/x/pool/keeper/keeper.go @@ -0,0 +1,70 @@ +package keeper + +import ( + "fmt" + + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + + authority string + + stakersKeeper types.StakersKeeper + accountKeeper authkeeper.AccountKeeper + bankKeeper bankkeeper.Keeper + distrkeeper distrkeeper.Keeper + upgradeKeeper types.UpgradeKeeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storetypes.StoreKey, + memKey storetypes.StoreKey, + + authority string, + + accountKeeper authkeeper.AccountKeeper, + bankKeeper bankkeeper.Keeper, + distrKeeper distrkeeper.Keeper, + upgradeKeeper types.UpgradeKeeper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + + authority: authority, + + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + distrkeeper: distrKeeper, + upgradeKeeper: upgradeKeeper, + } +} + +func SetStakersKeeper(k *Keeper, stakersKeeper types.StakersKeeper) { + k.stakersKeeper = stakersKeeper +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func (k Keeper) StoreKey() storetypes.StoreKey { + return k.storeKey +} diff --git a/x/pool/keeper/keeper_test.go b/x/pool/keeper/keeper_test.go new file mode 100644 index 00000000..b13c164d --- /dev/null +++ b/x/pool/keeper/keeper_test.go @@ -0,0 +1,16 @@ +package keeper_test + +import ( + "fmt" + "testing" + + "github.com/KYVENetwork/chain/x/pool/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestPoolKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Keeper Test Suite", types.ModuleName)) +} diff --git a/x/pool/keeper/keeper_utils_test.go b/x/pool/keeper/keeper_utils_test.go new file mode 100644 index 00000000..a053ca10 --- /dev/null +++ b/x/pool/keeper/keeper_utils_test.go @@ -0,0 +1,25 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" +) + +func BuildGovernanceTxs(s *i.KeeperTestSuite, msgs []sdk.Msg) (govV1Types.MsgSubmitProposal, govV1Types.MsgVote) { + minDeposit := s.App().GovKeeper.GetDepositParams(s.Ctx()).MinDeposit + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter := sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + + proposal, _ := govV1Types.NewMsgSubmitProposal( + msgs, minDeposit, i.DUMMY[0], "", + ) + + proposalId, _ := s.App().GovKeeper.GetProposalID(s.Ctx()) + + vote := govV1Types.NewMsgVote( + voter, proposalId, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + return *proposal, *vote +} diff --git a/x/pool/keeper/logic_end_block_handle_pool_upgrades.go b/x/pool/keeper/logic_end_block_handle_pool_upgrades.go new file mode 100644 index 00000000..f6f58ff8 --- /dev/null +++ b/x/pool/keeper/logic_end_block_handle_pool_upgrades.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// HandlePoolUpgrades handles to end-block logic for detecting and performing schedules pool-upgrades. +func (k Keeper) HandlePoolUpgrades(ctx sdk.Context) { + for _, pool := range k.GetAllPools(ctx) { + // PoolUpgrade is scheduled if `ScheduledAt` is not zero and smaller than the current block-time + if pool.UpgradePlan.ScheduledAt > 0 && uint64(ctx.BlockTime().Unix()) >= pool.UpgradePlan.ScheduledAt { + + // Check if pool upgrade already has been applied + if pool.Protocol.Version != pool.UpgradePlan.Version || pool.Protocol.Binaries != pool.UpgradePlan.Binaries { + // perform pool upgrade + pool.Protocol.Version = pool.UpgradePlan.Version + pool.Protocol.Binaries = pool.UpgradePlan.Binaries + pool.Protocol.LastUpgrade = pool.UpgradePlan.ScheduledAt + } + + // Check if upgrade duration was reached + if uint64(ctx.BlockTime().Unix()) >= (pool.UpgradePlan.ScheduledAt + pool.UpgradePlan.Duration) { + // reset upgrade plan to default values + pool.UpgradePlan = &types.UpgradePlan{} + } + + k.SetPool(ctx, pool) + } + } +} diff --git a/x/pool/keeper/logic_end_block_handle_pool_upgrades_test.go b/x/pool/keeper/logic_end_block_handle_pool_upgrades_test.go new file mode 100644 index 00000000..e0e0d633 --- /dev/null +++ b/x/pool/keeper/logic_end_block_handle_pool_upgrades_test.go @@ -0,0 +1,245 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - logic_end_block_handle_pool_upgrades.go + +* Schedule a pool upgrade in the past +* Schedule a pool upgrade in the future +* Schedule pool upgrade now and with no upgrade duration +* Try to schedule pool upgrade with same version but different binaries +* Try to schedule pool upgrade with same binaries but different version +* Try to schedule pool upgrade with same version and same binaries + +*/ + +var _ = Describe("logic_end_block_handle_pool_upgrades.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + MaxBundleSize: 100, + StartKey: "0", + MinDelegation: 100 * i.KYVE, + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Schedule a pool upgrade in the past", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + upgradePlan := pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{\"linux\":\"test\"}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()) - 120, + Duration: 3600, + } + + pool.UpgradePlan = &upgradePlan + + // ACT + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + s.CommitAfterSeconds(1) + + // ASSERT + // check if pool is currently upgrading + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&upgradePlan)) + + // check if upgrade is done after upgrade duration + s.CommitAfterSeconds(3600) + s.CommitAfterSeconds(1) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&pooltypes.UpgradePlan{})) + }) + + It("Schedule a pool upgrade in the future", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + upgradePlan := pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{\"linux\":\"test\"}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()) + 120, + Duration: 3600, + } + + pool.UpgradePlan = &upgradePlan + + // ACT + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + s.CommitAfterSeconds(1) + + // ASSERT + // check if pool upgrade is still only scheduled + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal("0.0.0")) + Expect(pool.Protocol.Binaries).To(Equal("{}")) + + Expect(pool.UpgradePlan).To(Equal(&upgradePlan)) + + s.CommitAfterSeconds(120) + s.CommitAfterSeconds(1) + + // check if pool is currently upgrading + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&upgradePlan)) + + s.CommitAfterSeconds(3600) + s.CommitAfterSeconds(1) + + // check if upgrade is done after upgrade duration + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&pooltypes.UpgradePlan{})) + }) + + It("Schedule pool upgrade now and with no upgrade duration", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + upgradePlan := pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{\"linux\":\"test\"}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 0, + } + + pool.UpgradePlan = &upgradePlan + + // ACT + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + s.CommitAfterSeconds(1) + + // ASSERT + // check if upgrade is done after upgrade duration + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&pooltypes.UpgradePlan{})) + }) + + It("Try to schedule pool upgrade with same version but different binaries", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + upgradePlan := pooltypes.UpgradePlan{ + Version: "0.0.0", + Binaries: "{\"linux\":\"test\"}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 0, + } + + pool.UpgradePlan = &upgradePlan + + // ACT + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + s.CommitAfterSeconds(1) + + // ASSERT + // check if upgrade is done after upgrade duration + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&pooltypes.UpgradePlan{})) + }) + + It("Try to schedule pool upgrade with same binaries but different version", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + upgradePlan := pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 0, + } + + pool.UpgradePlan = &upgradePlan + + // ACT + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + s.CommitAfterSeconds(1) + + // ASSERT + // check if upgrade is done after upgrade duration + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&pooltypes.UpgradePlan{})) + }) + + It("Try to schedule pool upgrade with same binaries but different version", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + upgradePlan := pooltypes.UpgradePlan{ + Version: "0.0.0", + Binaries: "{}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 0, + } + + pool.UpgradePlan = &upgradePlan + + // ACT + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + s.CommitAfterSeconds(1) + + // ASSERT + // check if upgrade is done after upgrade duration + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Protocol.Version).To(Equal(upgradePlan.Version)) + Expect(pool.Protocol.Binaries).To(Equal(upgradePlan.Binaries)) + + Expect(pool.UpgradePlan).To(Equal(&pooltypes.UpgradePlan{})) + }) +}) diff --git a/x/pool/keeper/logic_funders.go b/x/pool/keeper/logic_funders.go new file mode 100644 index 00000000..f72c2972 --- /dev/null +++ b/x/pool/keeper/logic_funders.go @@ -0,0 +1,78 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// ChargeFundersOfPool equally splits the amount between all funders and removes +// the appropriate amount from each funder. +// All funders who can't afford the amount, are kicked out. +// Their remaining amount is transferred to the Treasury. +// The function throws an error if pool ran out of funds. +// This method does not transfer any funds. The bundles-module +// is responsible for transferring the rewards out of the module. +func (k Keeper) ChargeFundersOfPool(ctx sdk.Context, poolId uint64, amount uint64) error { + pool, poolErr := k.GetPoolWithError(ctx, poolId) + if poolErr != nil { + return poolErr + } + + // This is the amount every funder will be charged + var amountPerFunder uint64 + + // Due to discrete division there will be a reminder which can not be split + // equally among all funders. This amount is charged to the lowest funder + var amountRemainder uint64 + + // When a funder is not able to pay, all the remaining funds will be moved + // to the treasury. + var slashedFunds uint64 + + // Remove all funders who can not afford amountPerFunder + for len(pool.Funders) > 0 { + amountPerFunder = amount / uint64(len(pool.Funders)) + amountRemainder = amount - amountPerFunder*uint64(len(pool.Funders)) + + lowestFunder := pool.GetLowestFunder() + + if amountRemainder+amountPerFunder > lowestFunder.Amount { + pool.RemoveFunder(lowestFunder.Address) + + _ = ctx.EventManager().EmitTypedEvent(&pooltypes.EventPoolFundsSlashed{ + PoolId: poolId, + Address: lowestFunder.Address, + Amount: lowestFunder.Amount, + }) + + slashedFunds += lowestFunder.Amount + } else { + break + } + } + + if slashedFunds > 0 { + // send slash to treasury + if err := util.TransferFromModuleToTreasury(k.accountKeeper, k.distrkeeper, ctx, pooltypes.ModuleName, slashedFunds); err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "pool module out of funds") + } + } + + if len(pool.Funders) == 0 { + k.SetPool(ctx, pool) + return pooltypes.ErrFundsTooLow + } + + // Remove amount from funders + for _, funder := range pool.Funders { + pool.SubtractAmountFromFunder(funder.Address, amountPerFunder) + } + + lowestFunder := pool.GetLowestFunder() + pool.SubtractAmountFromFunder(lowestFunder.Address, amountRemainder) + + k.SetPool(ctx, pool) + + return nil +} diff --git a/x/pool/keeper/logic_funders_test.go b/x/pool/keeper/logic_funders_test.go new file mode 100644 index 00000000..ecff4204 --- /dev/null +++ b/x/pool/keeper/logic_funders_test.go @@ -0,0 +1,285 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/util" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - logic_funders_test.go + +* Add funders; check total sum +* Add multiple funders; check total sum +* Remove funder +* Remove funder by defunding everything +* Charge Funders with equal amounts +* Charge Funders test remainder +* Charge exactly the lowest funder amount +* Kick out multiple lowest funders +* Kick out all funders + +*/ + +func chargeFunders(s *i.KeeperTestSuite, amount uint64) error { + err := s.App().PoolKeeper.ChargeFundersOfPool(s.Ctx(), 0, amount) + if err == nil { + return util.TransferFromModuleToAddress(s.App().BankKeeper, s.Ctx(), pooltypes.ModuleName, i.BURNER, amount) + } + return err +} + +func fundersCheck(pool *pooltypes.Pool) { + poolFunds := uint64(0) + funders := make(map[string]bool) + for _, funder := range pool.Funders { + Expect(funders[funder.Address]).To(BeFalse()) + funders[funder.Address] = true + poolFunds += funder.Amount + } + Expect(pool.TotalFunds).To(Equal(poolFunds)) +} + +var _ = Describe("logic_funders_test.go", Ordered, func() { + s := i.NewCleanChain() + var pool *pooltypes.Pool + + BeforeEach(func() { + s = i.NewCleanChain() + pool = &pooltypes.Pool{ + Name: "Moontest", + MaxBundleSize: 100, + StartKey: "0", + MinDelegation: 100 * i.KYVE, + UploadInterval: 60, + OperatingCost: 10_000, + UpgradePlan: &pooltypes.UpgradePlan{}, + } + + s.App().PoolKeeper.AppendPool(s.Ctx(), *pool) + }) + + AfterEach(func() { + fundersCheck(pool) + s.PerformValidityChecks() + }) + + It("Add funders; check total sum", func() { + // ACT + pool.AddAmountToFunder(i.ALICE, 1000) + pool.AddAmountToFunder(i.ALICE, 2000) + pool.AddAmountToFunder(i.ALICE, 0) + pool.AddAmountToFunder(i.BOB, 0) + pool.AddAmountToFunder(i.ALICE, 10) + + // ASSERT + Expect(pool.TotalFunds).To(Equal(uint64(3010))) + Expect(pool.Funders).To(HaveLen(1)) + }) + + It("Add multiple funders; check total sum", func() { + // ACT + pool.AddAmountToFunder(i.ALICE, 1000) + pool.AddAmountToFunder(i.ALICE, 2000) + pool.AddAmountToFunder(i.ALICE, 0) + pool.AddAmountToFunder(i.BOB, 1000) + pool.AddAmountToFunder(i.ALICE, 10) + + // ASSERT + Expect(pool.TotalFunds).To(Equal(uint64(4010))) + Expect(pool.Funders).To(HaveLen(2)) + }) + + It("Remove funder", func() { + // ARRANGE + pool.AddAmountToFunder(i.ALICE, 1000) + pool.AddAmountToFunder(i.ALICE, 2000) + pool.AddAmountToFunder(i.ALICE, 0) + pool.AddAmountToFunder(i.BOB, 0) + pool.AddAmountToFunder(i.ALICE, 10) + pool.AddAmountToFunder(i.CHARLIE, 500) + + Expect(pool.TotalFunds).To(Equal(uint64(3510))) + + // ACT + // Alice: 3010 + // Charlie: 500 + pool.RemoveFunder(i.CHARLIE) + + // ASSERT + Expect(pool.TotalFunds).To(Equal(uint64(3010))) + Expect(pool.Funders).To(HaveLen(1)) + }) + + It("Remove funder by defunding everything", func() { + // ARRANGE + pool.AddAmountToFunder(i.ALICE, 1000) + pool.AddAmountToFunder(i.ALICE, 2000) + pool.AddAmountToFunder(i.ALICE, 0) + pool.AddAmountToFunder(i.BOB, 0) + pool.AddAmountToFunder(i.ALICE, 10) + pool.AddAmountToFunder(i.CHARLIE, 500) + + // ACT + // Alice: 3010 + // Charlie: 500 + pool.SubtractAmountFromFunder(i.ALICE, 3010) + + // ASSERT + Expect(pool.TotalFunds).To(Equal(uint64(500))) + Expect(pool.Funders).To(HaveLen(1)) + }) + + It("Charge Funders with equal amounts", func() { + // ARRANGE + for k := 0; k < 50; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[k], + Id: 0, + Amount: 100 * i.KYVE, + }) + } + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + Expect(pool.TotalFunds).To(Equal(50 * 100 * i.KYVE)) + + // ACT + err := chargeFunders(s, 50*10*i.KYVE) + + // ASSERT + Expect(err).NotTo(HaveOccurred()) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.TotalFunds).To(Equal(50 * 90 * i.KYVE)) + + for _, funder := range pool.Funders { + Expect(funder.Amount).To(Equal(90 * i.KYVE)) + } + }) + + It("Charge Funders test remainder", func() { + // ARRANGE + for k := 0; k < 50; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[k], + Id: 0, + Amount: 100 * i.KYVE, + }) + } + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + Expect(pool.TotalFunds).To(Equal(50 * 100 * i.KYVE)) + + // ACT + // Charge 10 $KYVE + 49tkyve + // the 49 tkyve will be charged to the lowest funder + err := chargeFunders(s, 50*10*i.KYVE+49) + + // ASSERT + Expect(err).NotTo(HaveOccurred()) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + for _, funder := range pool.Funders { + if pool.GetLowestFunder().Address == funder.Address { + Expect(funder.Amount).To(Equal(90*i.KYVE - 49)) + } else { + Expect(funder.Amount).To(Equal(90 * i.KYVE)) + } + } + }) + + It("Charge exactly lowest funder amount", func() { + // ARRANGE + for k := 0; k < 40; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[k], + Id: 0, + Amount: 100 * i.KYVE, + }) + } + for k := 0; k < 10; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[40+k], + Id: 0, + Amount: 200 * i.KYVE, + }) + } + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + Expect(pool.TotalFunds).To(Equal((100*40 + 200*10) * i.KYVE)) + + // ACT + err := chargeFunders(s, 50*100*i.KYVE) + + // ASSERT + Expect(err).NotTo(HaveOccurred()) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.Funders).To(HaveLen(10)) + }) + + It("Kick out multiple lowest funders", func() { + // Arrange + for k := 0; k < 40; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[k], + Id: 0, + Amount: 50 * i.KYVE, + }) + } + for k := 0; k < 10; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[40+k], + Id: 0, + Amount: 1000 * i.KYVE, + }) + } + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + Expect(pool.TotalFunds).To(Equal((50*40 + 1000*10) * i.KYVE)) + + // 40 * 50 = 2000 + // 10 * 1000 = 10000 + // Charge 5000 + + // Act + err := chargeFunders(s, 5000*i.KYVE) + + // Assert + Expect(err).NotTo(HaveOccurred()) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.Funders).To(HaveLen(10)) + + for _, funder := range pool.Funders { + Expect(funder.Amount).To(Equal(500 * i.KYVE)) + } + }) + + It("Charge more than pool has funds", func() { + // ARRANGE + for k := 0; k < 50; k++ { + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[k], + Id: 0, + Amount: 50 * i.KYVE, + }) + } + pool, poolFound := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(poolFound).To(BeTrue()) + Expect(pool.TotalFunds).To(Equal((50 * 50) * i.KYVE)) + + // ACT + err := chargeFunders(s, 5000*i.KYVE) + + // ASSERT + Expect(err).To(HaveOccurred()) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.Funders).To(HaveLen(0)) + }) +}) diff --git a/x/pool/keeper/logic_pool.go b/x/pool/keeper/logic_pool.go new file mode 100644 index 00000000..42c94b44 --- /dev/null +++ b/x/pool/keeper/logic_pool.go @@ -0,0 +1,45 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// GetPoolWithError returns a pool by its poolId, if the pool does not exist, +// a types.ErrPoolNotFound error is returned +func (k Keeper) GetPoolWithError(ctx sdk.Context, poolId uint64) (types.Pool, error) { + pool, found := k.GetPool(ctx, poolId) + if !found { + return types.Pool{}, sdkErrors.Wrapf(sdkErrors.ErrNotFound, types.ErrPoolNotFound.Error(), poolId) + } + return pool, nil +} + +// AssertPoolExists returns nil if the pool exists and types.ErrPoolNotFound if it does not. +func (k Keeper) AssertPoolExists(ctx sdk.Context, poolId uint64) error { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PoolKey) + if store.Has(types.PoolKeyPrefix(poolId)) { + return nil + } + return sdkErrors.Wrapf(sdkErrors.ErrNotFound, types.ErrPoolNotFound.Error(), poolId) +} + +// IncrementBundleInformation updates the latest finalized bundle of a pool +func (k Keeper) IncrementBundleInformation( + ctx sdk.Context, + poolId uint64, + currentIndex uint64, + currentKey string, + currentSummary string, +) { + pool, found := k.GetPool(ctx, poolId) + if found { + pool.CurrentIndex = currentIndex + pool.TotalBundles = pool.TotalBundles + 1 + pool.CurrentKey = currentKey + pool.CurrentSummary = currentSummary + k.SetPool(ctx, pool) + } +} diff --git a/x/pool/keeper/msg_server.go b/x/pool/keeper/msg_server.go new file mode 100644 index 00000000..f2eb405a --- /dev/null +++ b/x/pool/keeper/msg_server.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/pool/types" +) + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/x/pool/keeper/msg_server_cancel_runtime_upgrade.go b/x/pool/keeper/msg_server_cancel_runtime_upgrade.go new file mode 100644 index 00000000..f51215a6 --- /dev/null +++ b/x/pool/keeper/msg_server_cancel_runtime_upgrade.go @@ -0,0 +1,35 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +func (k msgServer) CancelRuntimeUpgrade(goCtx context.Context, req *types.MsgCancelRuntimeUpgrade) (*types.MsgCancelRuntimeUpgradeResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + for _, pool := range k.GetAllPools(ctx) { + if pool.Runtime != req.Runtime { + continue + } + if pool.UpgradePlan.ScheduledAt == 0 { + continue + } + + pool.UpgradePlan = &types.UpgradePlan{} + k.SetPool(ctx, pool) + } + + return &types.MsgCancelRuntimeUpgradeResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_cancel_runtime_upgrade_test.go b/x/pool/keeper/msg_server_cancel_runtime_upgrade_test.go new file mode 100644 index 00000000..71d566fa --- /dev/null +++ b/x/pool/keeper/msg_server_cancel_runtime_upgrade_test.go @@ -0,0 +1,184 @@ +package keeper_test + +import ( + "time" + + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_cancel_runtime_upgrade.go + +* Invalid authority (transaction). +* Invalid authority (proposal). +* Cancel scheduled runtime upgrade +* Try to cancel upgrade which is already upgrading + +*/ + +var _ = Describe("msg_server_cancel_runtime_upgrade.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + Runtime: "@kyve/test", + Protocol: &types.Protocol{ + Version: "0.0.0", + Binaries: "{\"linux\":\"test\"}", + LastUpgrade: 0, + }, + UpgradePlan: &types.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid authority (transaction).", func() { + // ARRANGE + msg := &types.MsgCancelRuntimeUpgrade{ + Authority: i.DUMMY[0], + Runtime: "@kyve/test", + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal).", func() { + // ARRANGE + msg := &types.MsgCancelRuntimeUpgrade{ + Authority: i.DUMMY[0], + Runtime: "@kyve/test", + } + + proposal, _ := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, err := s.RunTx(&proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Cancel scheduled runtime upgrade", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + // ACT + cancel := &types.MsgCancelRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + } + + p, v = BuildGovernanceTxs(s, []sdk.Msg{cancel}) + + _, submitErr = s.RunTx(&p) + _, voteErr = s.RunTx(&v) + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.UpgradePlan).To(Equal(&types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + })) + }) + + It("Try to cancel upgrade which is already upgrading", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()), + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + // ACT + cancel := &types.MsgCancelRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + } + + p, v = BuildGovernanceTxs(s, []sdk.Msg{cancel}) + + _, submitErr = s.RunTx(&p) + _, voteErr = s.RunTx(&v) + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.UpgradePlan).To(Equal(&types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + })) + }) +}) diff --git a/x/pool/keeper/msg_server_create_pool.go b/x/pool/keeper/msg_server_create_pool.go new file mode 100644 index 00000000..1b19972d --- /dev/null +++ b/x/pool/keeper/msg_server_create_pool.go @@ -0,0 +1,67 @@ +package keeper + +import ( + "context" + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +func (k msgServer) CreatePool(goCtx context.Context, req *types.MsgCreatePool) (*types.MsgCreatePoolResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + if !json.Valid([]byte(req.Binaries)) { + return nil, errors.Wrapf(errorsTypes.ErrLogic, types.ErrInvalidJson.Error(), req.Binaries) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + k.AppendPool(ctx, types.Pool{ + Name: req.Name, + Runtime: req.Runtime, + Logo: req.Logo, + Config: req.Config, + StartKey: req.StartKey, + UploadInterval: req.UploadInterval, + OperatingCost: req.OperatingCost, + MinDelegation: req.MinDelegation, + MaxBundleSize: req.MaxBundleSize, + Protocol: &types.Protocol{ + Version: req.Version, + Binaries: req.Binaries, + LastUpgrade: uint64(ctx.BlockTime().Unix()), + }, + UpgradePlan: &types.UpgradePlan{}, + CurrentStorageProviderId: req.StorageProviderId, + CurrentCompressionId: req.CompressionId, + }) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventCreatePool{ + Id: k.GetPoolCount(ctx) - 1, + Name: req.Name, + Runtime: req.Runtime, + Logo: req.Logo, + Config: req.Config, + StartKey: req.StartKey, + UploadInterval: req.UploadInterval, + OperatingCost: req.OperatingCost, + MinDelegation: req.MinDelegation, + MaxBundleSize: req.MaxBundleSize, + Version: req.Version, + Binaries: req.Binaries, + StorageProviderId: req.StorageProviderId, + CompressionId: req.CompressionId, + }) + + return &types.MsgCreatePoolResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_create_pool_test.go b/x/pool/keeper/msg_server_create_pool_test.go new file mode 100644 index 00000000..48ca0692 --- /dev/null +++ b/x/pool/keeper/msg_server_create_pool_test.go @@ -0,0 +1,307 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_create_pool.go + +* Invalid authority (transaction) +* Invalid authority (proposal) +* Create first pool +* Create another pool +* Create pool with invalid binaries + +*/ + +var _ = Describe("msg_server_create_pool.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + BeforeEach(func() { + s = i.NewCleanChain() + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgCreatePool{ + Authority: i.DUMMY[0], + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Version: "0.0.0", + Binaries: "{}", + StorageProviderId: 2, + CompressionId: 1, + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgCreatePool{ + Authority: i.DUMMY[0], + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Version: "0.0.0", + Binaries: "{}", + StorageProviderId: 2, + CompressionId: 1, + } + + proposal, _ := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, err := s.RunTx(&proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Create first pool", func() { + // ARRANGE + msg := &types.MsgCreatePool{ + Authority: gov, + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Version: "0.0.0", + Binaries: "{}", + StorageProviderId: 2, + CompressionId: 1, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool).To(Equal(types.Pool{ + Id: 0, + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + CurrentKey: "", + CurrentSummary: "", + CurrentIndex: 0, + TotalBundles: 0, + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Disabled: false, + Funders: nil, + TotalFunds: 0, + Protocol: &types.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + }, + CurrentStorageProviderId: 2, + CurrentCompressionId: 1, + })) + }) + + It("Create another pool", func() { + // ARRANGE + msg := &types.MsgCreatePool{ + Authority: gov, + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Version: "0.0.0", + Binaries: "{}", + StorageProviderId: 2, + CompressionId: 1, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + // ACT + msg = &types.MsgCreatePool{ + Authority: gov, + Name: "TestPool2", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Version: "0.0.0", + Binaries: "{}", + StorageProviderId: 2, + CompressionId: 1, + } + + p, v = BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr = s.RunTx(&p) + _, voteErr = s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ARRANGE + proposal, _ = s.App().GovKeeper.GetProposal(s.Ctx(), 2) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 1) + Expect(pool).To(Equal(types.Pool{ + Id: 1, + Name: "TestPool2", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + CurrentKey: "", + CurrentSummary: "", + CurrentIndex: 0, + TotalBundles: 0, + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Disabled: false, + Funders: nil, + TotalFunds: 0, + Protocol: &types.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + }, + CurrentStorageProviderId: 2, + CurrentCompressionId: 1, + })) + }) + + It("Create pool with invalid binaries", func() { + // ARRANGE + msg := &types.MsgCreatePool{ + Authority: gov, + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Version: "0.0.0", + Binaries: "{", + StorageProviderId: 2, + CompressionId: 1, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + + _, found := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(found).To(BeFalse()) + }) +}) diff --git a/x/pool/keeper/msg_server_defund_pool.go b/x/pool/keeper/msg_server_defund_pool.go new file mode 100644 index 00000000..674cc3a7 --- /dev/null +++ b/x/pool/keeper/msg_server_defund_pool.go @@ -0,0 +1,54 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// DefundPool handles the logic to defund a pool. +// If the user is a funder, it will subtract the provided amount +// and send the tokens back. If the amount equals the current funding amount +// the funder is removed completely. +func (k msgServer) DefundPool(goCtx context.Context, msg *types.MsgDefundPool) (*types.MsgDefundPoolResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + pool, found := k.GetPool(ctx, msg.Id) + + // Pool has to exist + if !found { + return nil, sdkErrors.Wrapf(sdkErrors.ErrNotFound, types.ErrPoolNotFound.Error(), msg.Id) + } + + // Sender needs to be a funder in the pool + funderAmount := pool.GetFunderAmount(msg.Creator) + if funderAmount == 0 { + return nil, sdkErrors.ErrNotFound + } + + // Check if the sender is trying to defund more than they have funded. + if msg.Amount > funderAmount { + return nil, sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrDefundTooHigh.Error(), msg.Creator) + } + + // Update state variables (or completely remove if fully defunding). + pool.SubtractAmountFromFunder(msg.Creator, msg.Amount) + + // Transfer tokens from this module to sender. + if err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, msg.Creator, msg.Amount); err != nil { + return nil, err + } + + // Emit a defund event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventDefundPool{ + PoolId: msg.Id, + Address: msg.Creator, + Amount: msg.Amount, + }) + + k.SetPool(ctx, pool) + + return &types.MsgDefundPoolResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_defund_pool_test.go b/x/pool/keeper/msg_server_defund_pool_test.go new file mode 100644 index 00000000..324e49f9 --- /dev/null +++ b/x/pool/keeper/msg_server_defund_pool_test.go @@ -0,0 +1,149 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_defund_pool.go + +* Defund 50 KYVE from a funder who has previously funded 100 KYVE +* Try to defund more than actually funded +* Defund full funding amount from a funder who has previously funded 100 KYVE +* Defund as highest funder 75 KYVE in order to be the lowest funder afterwards + +*/ + +var _ = Describe("msg_server_defund_pool.go", Ordered, func() { + s := i.NewCleanChain() + + initialBalance := s.GetBalanceFromAddress(i.ALICE) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + // fund pool + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Defund 50 KYVE from a funder who has previously funded 100 KYVE", func() { + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 50 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(Equal(50 * i.KYVE)) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.TotalFunds).To(Equal(50 * i.KYVE)) + + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(50 * i.KYVE)) + + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(50 * i.KYVE)) + }) + + It("Try to defund more than actually funded", func() { + // ACT + s.RunTxPoolError(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 101 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(Equal(100 * i.KYVE)) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.TotalFunds).To(Equal(100 * i.KYVE)) + + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(100 * i.KYVE)) + + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(100 * i.KYVE)) + }) + + It("Defund full funding amount from a funder who has previously funded 100 KYVE", func() { + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(BeZero()) + + Expect(pool.Funders).To(BeEmpty()) + Expect(pool.TotalFunds).To(BeZero()) + + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(uint64(0))) + + Expect(pool.GetLowestFunder()).To(Equal(pooltypes.Funder{})) + }) + + It("Defund as highest funder 75 KYVE in order to be the lowest funder afterwards", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 50 * i.KYVE, + }) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.GetLowestFunder().Address).To(Equal(i.BOB)) + Expect(pool.GetLowestFunder().Amount).To(Equal(50 * i.KYVE)) + + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 75 * i.KYVE, + }) + + // ASSERT + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(25 * i.KYVE)) + }) +}) diff --git a/x/pool/keeper/msg_server_disable_pool.go b/x/pool/keeper/msg_server_disable_pool.go new file mode 100644 index 00000000..b0fd8f2a --- /dev/null +++ b/x/pool/keeper/msg_server_disable_pool.go @@ -0,0 +1,42 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +func (k msgServer) DisablePool(goCtx context.Context, req *types.MsgDisablePool) (*types.MsgDisablePoolResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + pool, found := k.GetPool(ctx, req.Id) + + if !found { + return nil, errors.Wrapf(sdkErrors.ErrNotFound, types.ErrPoolNotFound.Error(), req.Id) + } + + if pool.Disabled { + return nil, errors.Wrapf(sdkErrors.ErrLogic, "Pool is already disabled.") + } + + pool.Disabled = true + k.SetPool(ctx, pool) + + // remove all stakers from pool in order to "reset" it + poolMembers := k.stakersKeeper.GetAllStakerAddressesOfPool(ctx, pool.Id) + for _, staker := range poolMembers { + k.stakersKeeper.LeavePool(ctx, staker, pool.Id) + } + + return &types.MsgDisablePoolResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_disable_pool_test.go b/x/pool/keeper/msg_server_disable_pool_test.go new file mode 100644 index 00000000..13fdde32 --- /dev/null +++ b/x/pool/keeper/msg_server_disable_pool_test.go @@ -0,0 +1,492 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Gov + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_disabled_pool.go + +* Invalid authority (transaction) +* Invalid authority (proposal) +* Disable a non-existing pool +* Disable pool which is active +* Disable pool which is already disabled +* Disable multiple pools +* Kick out all stakers from pool +* Kick out all stakers from pool which are still members of another pool +* Drop current bundle proposal when pool gets disabled + +*/ + +var _ = Describe("msg_server_disable_pool.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + BeforeEach(func() { + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + Name: "PoolTest", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &types.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &types.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&types.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + Name: "PoolTest2", + MaxBundleSize: 100, + StartKey: "0", + UploadInterval: 60, + OperatingCost: 10_000, + Protocol: &types.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &types.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&types.MsgFundPool{ + Creator: i.ALICE, + Id: 1, + Amount: 100 * i.KYVE, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid authority (transaction).", func() { + // ARRANGE + msg := &types.MsgDisablePool{ + Authority: i.DUMMY[0], + Id: 0, + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.Disabled).To(BeFalse()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Invalid authority (proposal).", func() { + // ARRANGE + msg := &types.MsgDisablePool{ + Authority: i.DUMMY[0], + Id: 0, + } + + proposal, _ := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, err := s.RunTx(&proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.Disabled).To(BeFalse()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Disable a non-existing pool", func() { + // ARRANGE + msg := &types.MsgDisablePool{ + Authority: gov, + Id: 42, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + }) + + It("Disable pool which is active", func() { + // ARRANGE + msg := &types.MsgDisablePool{ + Authority: gov, + Id: 0, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(pool.Disabled).To(BeTrue()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Disable pool which is already disabled", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + pool.Disabled = true + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + msg := &types.MsgDisablePool{ + Authority: gov, + Id: 0, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + Expect(pool.Disabled).To(BeTrue()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Disable multiple pools", func() { + // ARRANGE + msgFirstPool := &types.MsgDisablePool{ + Authority: gov, + Id: 0, + } + msgSecondPool := &types.MsgDisablePool{ + Authority: gov, + Id: 1, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msgFirstPool, msgSecondPool}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + firstPool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + secondPool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(firstPool.Disabled).To(BeTrue()) + Expect(secondPool.Disabled).To(BeTrue()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + + bundleProposal, _ = s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 1) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Kick out all stakers from pool", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + msgFirstPool := &types.MsgDisablePool{ + Authority: gov, + Id: 0, + } + + Expect(s.App().StakersKeeper.GetAllValaccounts(s.Ctx())).To(HaveLen(2)) + Expect(s.App().StakersKeeper.GetActiveStakers(s.Ctx())).To(HaveLen(2)) + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msgFirstPool}) + + msgVoteStaker0 := govV1Types.NewMsgVote(sdk.MustAccAddressFromBech32(i.STAKER_0), 1, govV1Types.VoteOption_VOTE_OPTION_YES, "") + msgVoteStaker1 := govV1Types.NewMsgVote(sdk.MustAccAddressFromBech32(i.STAKER_0), 1, govV1Types.VoteOption_VOTE_OPTION_YES, "") + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + _, voteErr0 := s.RunTx(msgVoteStaker0) + _, voteErr1 := s.RunTx(msgVoteStaker1) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(s.App().StakersKeeper.GetAllValaccounts(s.Ctx())).To(HaveLen(0)) + Expect(s.App().StakersKeeper.GetActiveStakers(s.Ctx())).To(HaveLen(0)) + + firstPool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + Expect(voteErr0).To(Not(HaveOccurred())) + Expect(voteErr1).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(firstPool.Disabled).To(BeTrue()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Kick out all stakers from pool which are still members of another pool", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 1, + Valaddress: i.VALADDRESS_2, + Amount: 0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + msgFirstPool := &types.MsgDisablePool{ + Authority: gov, + Id: 0, + } + + Expect(s.App().StakersKeeper.GetAllValaccounts(s.Ctx())).To(HaveLen(3)) + Expect(s.App().StakersKeeper.GetActiveStakers(s.Ctx())).To(HaveLen(2)) + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msgFirstPool}) + + msgVoteStaker0 := govV1Types.NewMsgVote(sdk.MustAccAddressFromBech32(i.STAKER_0), 1, govV1Types.VoteOption_VOTE_OPTION_YES, "") + msgVoteStaker1 := govV1Types.NewMsgVote(sdk.MustAccAddressFromBech32(i.STAKER_0), 1, govV1Types.VoteOption_VOTE_OPTION_YES, "") + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + _, voteErr0 := s.RunTx(msgVoteStaker0) + _, voteErr1 := s.RunTx(msgVoteStaker1) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(s.App().StakersKeeper.GetAllValaccounts(s.Ctx())).To(HaveLen(1)) + Expect(s.App().StakersKeeper.GetActiveStakers(s.Ctx())).To(HaveLen(1)) + + firstPool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + Expect(voteErr0).To(Not(HaveOccurred())) + Expect(voteErr1).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(firstPool.Disabled).To(BeTrue()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(BeEmpty()) + }) + + It("Drop current bundle proposal when pool gets disabled", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposal.StorageId).To(Equal("y62A3tfbSNcNYDGoL-eXwzyV-Zc9Q0OVtDvR1biJmNI")) + + msgFirstPool := &types.MsgDisablePool{ + Authority: gov, + Id: 0, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msgFirstPool}) + + msgVoteStaker0 := govV1Types.NewMsgVote(sdk.MustAccAddressFromBech32(i.STAKER_0), 1, govV1Types.VoteOption_VOTE_OPTION_YES, "") + msgVoteStaker1 := govV1Types.NewMsgVote(sdk.MustAccAddressFromBech32(i.STAKER_0), 1, govV1Types.VoteOption_VOTE_OPTION_YES, "") + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + _, voteErr0 := s.RunTx(msgVoteStaker0) + _, voteErr1 := s.RunTx(msgVoteStaker1) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + Expect(voteErr0).To(Not(HaveOccurred())) + Expect(voteErr1).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(pool.Disabled).To(BeTrue()) + + // check if bundle proposal got dropped + bundleProposal, bundleProposalFound := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + Expect(bundleProposalFound).To(BeTrue()) + + Expect(bundleProposal.PoolId).To(Equal(uint64(0))) + Expect(bundleProposal.StorageId).To(BeEmpty()) + Expect(bundleProposal.Uploader).To(BeEmpty()) + Expect(bundleProposal.NextUploader).To(BeEmpty()) + Expect(bundleProposal.DataSize).To(BeZero()) + Expect(bundleProposal.DataHash).To(BeEmpty()) + Expect(bundleProposal.BundleSize).To(BeZero()) + Expect(bundleProposal.FromKey).To(BeEmpty()) + Expect(bundleProposal.ToKey).To(BeEmpty()) + Expect(bundleProposal.BundleSummary).To(BeEmpty()) + Expect(bundleProposal.UpdatedAt).NotTo(BeZero()) + Expect(bundleProposal.VotersValid).To(BeEmpty()) + Expect(bundleProposal.VotersInvalid).To(BeEmpty()) + Expect(bundleProposal.VotersAbstain).To(BeEmpty()) + }) +}) diff --git a/x/pool/keeper/msg_server_enable_pool.go b/x/pool/keeper/msg_server_enable_pool.go new file mode 100644 index 00000000..6c727e81 --- /dev/null +++ b/x/pool/keeper/msg_server_enable_pool.go @@ -0,0 +1,35 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +func (k msgServer) EnablePool(goCtx context.Context, req *types.MsgEnablePool) (*types.MsgEnablePoolResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + pool, found := k.GetPool(ctx, req.Id) + + if !found { + return nil, errors.Wrapf(sdkErrors.ErrNotFound, types.ErrPoolNotFound.Error(), req.Id) + } + if !pool.Disabled { + return nil, errors.Wrapf(sdkErrors.ErrLogic, "Pool is already enabled.") + } + + pool.Disabled = false + k.SetPool(ctx, pool) + + return &types.MsgEnablePoolResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_enable_pool_test.go b/x/pool/keeper/msg_server_enable_pool_test.go new file mode 100644 index 00000000..ab0fbc73 --- /dev/null +++ b/x/pool/keeper/msg_server_enable_pool_test.go @@ -0,0 +1,203 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Gov + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_enable_pool.go + +* Invalid authority (transaction) +* Invalid authority (proposal) +* Enable a non-existing pool +* Enable pool which is active +* Enable pool which is disabled +* Enable multiple pools + +*/ + +var _ = Describe("msg_server_enable_pool.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + UpgradePlan: &types.UpgradePlan{}, + }) + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + UpgradePlan: &types.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgEnablePool{ + Authority: i.DUMMY[0], + Id: 0, + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgEnablePool{ + Authority: i.DUMMY[0], + Id: 0, + } + + proposal, _ := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, err := s.RunTx(&proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Enable a non-existing pool", func() { + // ARRANGE + msg := &types.MsgEnablePool{ + Authority: gov, + Id: 42, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + }) + + It("Enable pool which is active.", func() { + // ARRANGE + msg := &types.MsgEnablePool{ + Authority: gov, + Id: 0, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + Expect(pool.Disabled).To(BeFalse()) + }) + + It("Enable pool which is disabled", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + pool.Disabled = true + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + msg := &types.MsgEnablePool{ + Authority: gov, + Id: 0, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(pool.Disabled).To(BeFalse()) + }) + + It("Enable multiple pools", func() { + // ARRANGE + firstPool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + firstPool.Disabled = true + s.App().PoolKeeper.SetPool(s.Ctx(), firstPool) + + secondPool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 1) + secondPool.Disabled = true + s.App().PoolKeeper.SetPool(s.Ctx(), secondPool) + + msgFirstPool := &types.MsgEnablePool{ + Authority: gov, + Id: 0, + } + msgSecondPool := &types.MsgEnablePool{ + Authority: gov, + Id: 1, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msgFirstPool, msgSecondPool}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + firstPool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + secondPool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + Expect(firstPool.Disabled).To(BeFalse()) + Expect(secondPool.Disabled).To(BeFalse()) + }) +}) diff --git a/x/pool/keeper/msg_server_fund_pool.go b/x/pool/keeper/msg_server_fund_pool.go new file mode 100644 index 00000000..676fda51 --- /dev/null +++ b/x/pool/keeper/msg_server_fund_pool.go @@ -0,0 +1,70 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// FundPool handles the logic to fund a pool. +// A funder is added to the funders list with the specified amount +// If the funders list is full, it checks if the funder wants to fund +// more than the current lowest funder. If so, the current lowest funder +// will get their tokens back and removed form the funders list. +func (k msgServer) FundPool(goCtx context.Context, msg *types.MsgFundPool) (*types.MsgFundPoolResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + pool, poolFound := k.GetPool(ctx, msg.Id) + + if !poolFound { + return nil, sdkErrors.Wrapf(sdkErrors.ErrNotFound, types.ErrPoolNotFound.Error(), msg.Id) + } + + // Check if funder already exists + // If sender is not a funder, check if a free funding slot is still available + if pool.GetFunderAmount(msg.Creator) == 0 { + // If funder does not exist, check if limit is already exceeded. + if len(pool.Funders) >= types.MaxFunders { + // If so, check if funder wants to fund more than current lowest funder. + lowestFunder := pool.GetLowestFunder() + if msg.Amount > lowestFunder.Amount { + // Unstake lowest Funder + err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, lowestFunder.Address, lowestFunder.Amount) + if err != nil { + return nil, err + } + + // Emit a defund event. + _ = ctx.EventManager().EmitTypedEvent(&types.EventDefundPool{ + PoolId: msg.Id, + Address: lowestFunder.Address, + Amount: lowestFunder.Amount, + }) + + // Remove from pool + pool.RemoveFunder(lowestFunder.Address) + } else { + return nil, sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrFundsTooLow.Error(), lowestFunder.Amount) + } + } + } + + // User is allowed to fund + pool.AddAmountToFunder(msg.Creator, msg.Amount) + + if err := util.TransferFromAddressToModule(k.bankKeeper, ctx, msg.Creator, types.ModuleName, msg.Amount); err != nil { + return nil, err + } + + _ = ctx.EventManager().EmitTypedEvent(&types.EventFundPool{ + PoolId: msg.Id, + Address: msg.Creator, + Amount: msg.Amount, + }) + + k.SetPool(ctx, pool) + + return &types.MsgFundPoolResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_fund_pool_test.go b/x/pool/keeper/msg_server_fund_pool_test.go new file mode 100644 index 00000000..6cc6fb8f --- /dev/null +++ b/x/pool/keeper/msg_server_fund_pool_test.go @@ -0,0 +1,278 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_fund_pool.go + +* Create funder by funding a pool with 100 $KYVE +* Fund additional 50 $KYVE to an existing funder with 100 $KYVE +* Try to fund more $KYVE than available in balance +* Fund with a new funder less $KYVE than the existing one +* Fund with a new funder more $KYVE than the existing one +* Try to fund less $KYVE than the lowest funder with full funding slots +* Try to fund more $KYVE than the lowest funder with full funding slots + +*/ + +var _ = Describe("msg_server_fund_pool.go", Ordered, func() { + s := i.NewCleanChain() + + initialBalance := s.GetBalanceFromAddress(i.ALICE) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create clean pool for every test case + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Create funder by funding a pool with 100 $KYVE", func() { + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(Equal(100 * i.KYVE)) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.TotalFunds).To(Equal(100 * i.KYVE)) + + funderAmount := pool.GetFunderAmount(i.ALICE) + + Expect(funderAmount).To(Equal(100 * i.KYVE)) + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(100 * i.KYVE)) + }) + + It("Fund additional 50 $KYVE to an existing funder with 100 $KYVE", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 50 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(Equal(150 * i.KYVE)) + + Expect(pool.Funders).To(HaveLen(1)) + Expect(pool.TotalFunds).To(Equal(150 * i.KYVE)) + + funderAmount := pool.GetFunderAmount(i.ALICE) + + Expect(funderAmount).To(Equal(150 * i.KYVE)) + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(150 * i.KYVE)) + }) + + It("Try to fund more $KYVE than available in balance", func() { + // ACT + currentBalance := s.GetBalanceFromAddress(i.ALICE) + + s.RunTxPoolError(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: currentBalance + 1, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(BeZero()) + + Expect(pool.Funders).To(BeEmpty()) + Expect(pool.TotalFunds).To(BeZero()) + + Expect(pool.GetFunderAmount(i.ALICE)).To(Equal(0 * i.KYVE)) + Expect(pool.GetLowestFunder().Address).To(Equal("")) + Expect(pool.GetLowestFunder().Amount).To(Equal(0 * i.KYVE)) + }) + + It("Fund with a new funder less $KYVE than the existing one", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 50 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.BOB) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(Equal(50 * i.KYVE)) + + Expect(pool.Funders).To(HaveLen(2)) + Expect(pool.TotalFunds).To(Equal(150 * i.KYVE)) + + funderAmount := pool.GetFunderAmount(i.BOB) + + Expect(funderAmount).To(Equal(50 * i.KYVE)) + Expect(pool.GetLowestFunder().Address).To(Equal(i.BOB)) + Expect(pool.GetLowestFunder().Amount).To(Equal(50 * i.KYVE)) + }) + + It("Fund with a new funder more $KYVE than the existing one", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.BOB, + Id: 0, + Amount: 200 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.BOB) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(initialBalance - balanceAfter).To(Equal(200 * i.KYVE)) + + Expect(pool.Funders).To(HaveLen(2)) + Expect(pool.TotalFunds).To(Equal(300 * i.KYVE)) + + funderAmount := pool.GetFunderAmount(i.BOB) + Expect(funderAmount).To(Equal(200 * i.KYVE)) + + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(100 * i.KYVE)) + }) + + It("Try to fund less $KYVE than the lowest funder with full funding slots", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + for a := 0; a < 49; a++ { + // fill remaining funding slots + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[a], + Id: 0, + Amount: 1000 * i.KYVE, + }) + } + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(50)) + Expect(pool.TotalFunds).To(Equal(49_100 * i.KYVE)) + + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(100 * i.KYVE)) + + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + + Expect(initialBalance - balanceAfter).To(Equal(100 * i.KYVE)) + + // ACT + s.RunTxPoolError(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[49], + Id: 0, + Amount: 50 * i.KYVE, + }) + + // ASSERT + Expect(pool.Funders).To(HaveLen(50)) + Expect(pool.TotalFunds).To(Equal(49_100 * i.KYVE)) + + Expect(pool.GetFunderAmount(i.DUMMY[49])).To(BeZero()) + Expect(pool.GetLowestFunder().Address).To(Equal(i.ALICE)) + Expect(pool.GetLowestFunder().Amount).To(Equal(100 * i.KYVE)) + }) + + It("Fund more $KYVE than the lowest funder with full funding slots", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + for a := 0; a < 49; a++ { + // fill remaining funding slots + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[a], + Id: 0, + Amount: 1000 * i.KYVE, + }) + } + + // ACT + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.DUMMY[49], + Id: 0, + Amount: 200 * i.KYVE, + }) + + // ASSERT + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(pool.Funders).To(HaveLen(50)) + Expect(pool.TotalFunds).To(Equal(49_200 * i.KYVE)) + + Expect(pool.GetFunderAmount(i.DUMMY[49])).To(Equal(200 * i.KYVE)) + Expect(pool.GetLowestFunder().Address).To(Equal(i.DUMMY[49])) + Expect(pool.GetLowestFunder().Amount).To(Equal(200 * i.KYVE)) + + balanceAfter := s.GetBalanceFromAddress(i.ALICE) + Expect(initialBalance - balanceAfter).To(BeZero()) + }) +}) diff --git a/x/pool/keeper/msg_server_schedule_runtime_upgrade.go b/x/pool/keeper/msg_server_schedule_runtime_upgrade.go new file mode 100644 index 00000000..d3001b9e --- /dev/null +++ b/x/pool/keeper/msg_server_schedule_runtime_upgrade.go @@ -0,0 +1,56 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +func (k msgServer) ScheduleRuntimeUpgrade(goCtx context.Context, req *types.MsgScheduleRuntimeUpgrade) (*types.MsgScheduleRuntimeUpgradeResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + if req.Version == "" || req.Binaries == "" { + return nil, types.ErrInvalidArgs + } + + var scheduledAt uint64 + ctx := sdk.UnwrapSDKContext(goCtx) + + // if upgrade was scheduled in the past we reschedule it to now + if req.ScheduledAt < uint64(ctx.BlockTime().Unix()) { + scheduledAt = uint64(ctx.BlockTime().Unix()) + } else { + scheduledAt = req.ScheduledAt + } + + for _, pool := range k.GetAllPools(ctx) { + // only schedule upgrade if the runtime matches + if pool.Runtime != req.Runtime { + continue + } + + // only schedule upgrade if there is no upgrade already + if pool.UpgradePlan.ScheduledAt != 0 { + continue + } + + pool.UpgradePlan = &types.UpgradePlan{ + Version: req.Version, + Binaries: req.Binaries, + ScheduledAt: scheduledAt, + Duration: req.Duration, + } + + k.SetPool(ctx, pool) + } + + return &types.MsgScheduleRuntimeUpgradeResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_schedule_runtime_upgrade_test.go b/x/pool/keeper/msg_server_schedule_runtime_upgrade_test.go new file mode 100644 index 00000000..cd3f0386 --- /dev/null +++ b/x/pool/keeper/msg_server_schedule_runtime_upgrade_test.go @@ -0,0 +1,291 @@ +package keeper_test + +import ( + "time" + + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_schedule_runtime_upgrade.go + +* Invalid authority (transaction). +* Invalid authority (proposal). +* Schedule runtime upgrade with no version +* Schedule runtime upgrade with no binaries +* Schedule runtime upgrade in the past +* Schedule runtime upgrade in the future +* Schedule runtime upgrade while another one is ongoing + +*/ + +var _ = Describe("msg_server_schedule_runtime_upgrade.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + Runtime: "@kyve/test", + Protocol: &types.Protocol{ + Version: "0.0.0", + Binaries: "{\"linux\":\"test\"}", + LastUpgrade: 0, + }, + UpgradePlan: &types.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid authority (transaction).", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: i.DUMMY[0], + Runtime: "@kyve/test", + Version: "1.0.0", + ScheduledAt: uint64(time.Now().Unix()), + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal).", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: i.DUMMY[0], + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()), + } + + proposal, _ := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, err := s.RunTx(&proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Schedule runtime upgrade with no version", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()), + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + }) + + It("Schedule runtime upgrade with no binaries", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()), + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + }) + + It("Schedule runtime upgrade in the past", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()) - 7*24*3600, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.UpgradePlan).To(Equal(&types.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(s.Ctx().BlockTime().Unix()), + Duration: 60, + })) + }) + + It("Schedule runtime upgrade in the future", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.UpgradePlan).To(Equal(&types.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + Duration: 60, + })) + }) + + It("Schedule runtime upgrade while another one is ongoing", func() { + // ARRANGE + msg := &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "1.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.UpgradePlan).To(Equal(&types.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + Duration: 60, + })) + + // ACT + msg = &types.MsgScheduleRuntimeUpgrade{ + Authority: gov, + Runtime: "@kyve/test", + Version: "2.0.0", + Binaries: "{}", + Duration: 60, + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + } + + p, v = BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr = s.RunTx(&p) + _, voteErr = s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ = s.App().GovKeeper.GetProposal(s.Ctx(), 2) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ = s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool.UpgradePlan).To(Equal(&types.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: uint64(time.Now().Unix()) + 7*24*3600, + Duration: 60, + })) + }) +}) diff --git a/x/pool/keeper/msg_server_update_pool.go b/x/pool/keeper/msg_server_update_pool.go new file mode 100644 index 00000000..3803bd5a --- /dev/null +++ b/x/pool/keeper/msg_server_update_pool.go @@ -0,0 +1,79 @@ +package keeper + +import ( + "context" + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + errorsTypes "github.com/cosmos/cosmos-sdk/types/errors" + + "github.com/KYVENetwork/chain/x/pool/types" + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" +) + +type Update struct { + Name *string + Runtime *string + Logo *string + Config *string + UploadInterval *uint64 + OperatingCost *uint64 + MinDelegation *uint64 + MaxBundleSize *uint64 + StorageProviderId *uint32 + CompressionId *uint32 +} + +func (k msgServer) UpdatePool(goCtx context.Context, req *types.MsgUpdatePool) (*types.MsgUpdatePoolResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + pool, found := k.GetPool(ctx, req.Id) + if !found { + return nil, errors.Wrapf(errorsTypes.ErrNotFound, types.ErrPoolNotFound.Error(), req.Id) + } + + var update Update + if err := json.Unmarshal([]byte(req.Payload), &update); err != nil { + return nil, err + } + + if update.Name != nil { + pool.Name = *update.Name + } + if update.Runtime != nil { + pool.Runtime = *update.Runtime + } + if update.Logo != nil { + pool.Logo = *update.Logo + } + if update.Config != nil { + pool.Config = *update.Config + } + if update.UploadInterval != nil { + pool.UploadInterval = *update.UploadInterval + } + if update.OperatingCost != nil { + pool.OperatingCost = *update.OperatingCost + } + if update.MinDelegation != nil { + pool.MinDelegation = *update.MinDelegation + } + if update.MaxBundleSize != nil { + pool.MaxBundleSize = *update.MaxBundleSize + } + if update.StorageProviderId != nil { + pool.CurrentStorageProviderId = *update.StorageProviderId + } + if update.CompressionId != nil { + pool.CurrentCompressionId = *update.CompressionId + } + + k.SetPool(ctx, pool) + + return &types.MsgUpdatePoolResponse{}, nil +} diff --git a/x/pool/keeper/msg_server_update_pool_test.go b/x/pool/keeper/msg_server_update_pool_test.go new file mode 100644 index 00000000..7833de3d --- /dev/null +++ b/x/pool/keeper/msg_server_update_pool_test.go @@ -0,0 +1,313 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Pool + "github.com/KYVENetwork/chain/x/pool/types" +) + +/* + +TEST CASES - msg_server_update_pool.go + +* Invalid authority (transaction) +* Invalid authority (proposal) +* Update first pool +* Update first pool partially +* Update another pool +* Update pool with invalid json payload + +*/ + +var _ = Describe("msg_server_update_pool.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + Name: "", + Runtime: "", + Logo: "", + Config: "", + UploadInterval: 0, + OperatingCost: 0, + MinDelegation: 0, + MaxBundleSize: 0, + CurrentStorageProviderId: 0, + CurrentCompressionId: 0, + Protocol: &types.Protocol{}, + UpgradePlan: &types.UpgradePlan{}, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgUpdatePool{ + Authority: i.DUMMY[0], + Id: 0, + Payload: "{\"Name\":\"TestPool\",\"Runtime\":\"@kyve/test\",\"Logo\":\"ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU\",\"Config\":\"ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0\",\"StartKey\":\"0\",\"UploadInterval\":60,\"OperatingCost\":10000,\"MinDelegation\":\"100000000000\",\"MaxBundleSize\":100,\"Version\":\"0.0.0\",\"Binaries\":\"{}\",\"StorageProviderId\":2,\"CompressionId\":1}", + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgUpdatePool{ + Authority: i.DUMMY[0], + Id: 0, + Payload: "{\"Name\":\"TestPool\",\"Runtime\":\"@kyve/test\",\"Logo\":\"ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU\",\"Config\":\"ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0\",\"StartKey\":\"0\",\"UploadInterval\":60,\"OperatingCost\":10000,\"MinDelegation\":\"100000000000\",\"MaxBundleSize\":100,\"Version\":\"0.0.0\",\"Binaries\":\"{}\",\"StorageProviderId\":2,\"CompressionId\":1}", + } + + proposal, _ := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, err := s.RunTx(&proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Update first pool", func() { + // ARRANGE + msg := &types.MsgUpdatePool{ + Authority: gov, + Id: 0, + Payload: "{\"Name\":\"TestPool\",\"Runtime\":\"@kyve/test\",\"Logo\":\"ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU\",\"Config\":\"ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0\",\"StartKey\":\"0\",\"UploadInterval\":60,\"OperatingCost\":10000,\"MinDelegation\":100000000000,\"MaxBundleSize\":100,\"Version\":\"0.0.0\",\"Binaries\":\"{}\",\"StorageProviderId\":2,\"CompressionId\":1}", + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool).To(Equal(types.Pool{ + Id: 0, + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "", + CurrentKey: "", + CurrentSummary: "", + CurrentIndex: 0, + TotalBundles: 0, + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Disabled: false, + Funders: nil, + TotalFunds: 0, + Protocol: &types.Protocol{ + Version: "", + Binaries: "", + LastUpgrade: 0, + }, + UpgradePlan: &types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + }, + CurrentStorageProviderId: 2, + CurrentCompressionId: 1, + })) + }) + + It("Update first pool partially", func() { + // ARRANGE + msg := &types.MsgUpdatePool{ + Authority: gov, + Id: 0, + Payload: "{\"Name\":\"TestPool\",\"Runtime\":\"@kyve/test\"}", + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + Expect(pool).To(Equal(types.Pool{ + Id: 0, + Name: "TestPool", + Runtime: "@kyve/test", + Logo: "", + Config: "", + StartKey: "", + CurrentKey: "", + CurrentSummary: "", + CurrentIndex: 0, + TotalBundles: 0, + UploadInterval: 0, + OperatingCost: 0, + MinDelegation: 0, + MaxBundleSize: 0, + Disabled: false, + Funders: nil, + TotalFunds: 0, + Protocol: &types.Protocol{ + Version: "", + Binaries: "", + LastUpgrade: 0, + }, + UpgradePlan: &types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + }, + CurrentStorageProviderId: 0, + CurrentCompressionId: 0, + })) + }) + + It("Update another pool", func() { + // ARRANGE + s.App().PoolKeeper.AppendPool(s.Ctx(), types.Pool{ + Name: "", + Runtime: "", + Logo: "", + Config: "", + UploadInterval: 0, + OperatingCost: 0, + MinDelegation: 0, + MaxBundleSize: 0, + CurrentStorageProviderId: 0, + CurrentCompressionId: 0, + Protocol: &types.Protocol{}, + UpgradePlan: &types.UpgradePlan{}, + }) + + // ACT + msg := &types.MsgUpdatePool{ + Authority: gov, + Id: 1, + Payload: "{\"Name\":\"TestPool2\",\"Runtime\":\"@kyve/test\",\"Logo\":\"ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU\",\"Config\":\"ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0\",\"StartKey\":\"0\",\"UploadInterval\":60,\"OperatingCost\":10000,\"MinDelegation\":100000000000,\"MaxBundleSize\":100,\"Version\":\"0.0.0\",\"Binaries\":\"{}\",\"StorageProviderId\":2,\"CompressionId\":1}", + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusPassed)) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 1) + Expect(pool).To(Equal(types.Pool{ + Id: 1, + Name: "TestPool2", + Runtime: "@kyve/test", + Logo: "ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + Config: "ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0", + StartKey: "", + CurrentKey: "", + CurrentSummary: "", + CurrentIndex: 0, + TotalBundles: 0, + UploadInterval: 60, + OperatingCost: 10000, + MinDelegation: 100 * i.KYVE, + MaxBundleSize: 100, + Disabled: false, + Funders: nil, + TotalFunds: 0, + Protocol: &types.Protocol{ + Version: "", + Binaries: "", + LastUpgrade: 0, + }, + UpgradePlan: &types.UpgradePlan{ + Version: "", + Binaries: "", + ScheduledAt: 0, + Duration: 0, + }, + CurrentStorageProviderId: 2, + CurrentCompressionId: 1, + })) + }) + + It("Update pool with invalid json payload", func() { + // ARRANGE + msg := &types.MsgUpdatePool{ + Authority: gov, + Id: 1, + Payload: "invalid_json_payload\",\"Runtime\":\"@kyve/test\",\"Logo\":\"ar://Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU\",\"Config\":\"ar://DgdB-2hLrxjhyEEbCML__dgZN5_uS7T6Z5XDkaFh3P0\",\"StartKey\":\"0\",\"UploadInterval\":60,\"OperatingCost\":10000,\"MinDelegation\":100000000000,\"MaxBundleSize\":100,\"Version\":\"0.0.0\",\"Binaries\":\"{}\",\"StorageProviderId\":2,\"CompressionId\":1}", + } + + p, v := BuildGovernanceTxs(s, []sdk.Msg{msg}) + + // ACT + _, submitErr := s.RunTx(&p) + _, voteErr := s.RunTx(&v) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + + Expect(submitErr).To(Not(HaveOccurred())) + Expect(voteErr).To(Not(HaveOccurred())) + + Expect(proposal.Status).To(Equal(govV1Types.StatusFailed)) + + pool, found := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + + Expect(found).To(BeTrue()) + Expect(pool.Name).To(BeEmpty()) + }) +}) diff --git a/x/pool/module.go b/x/pool/module.go new file mode 100644 index 00000000..bb003ede --- /dev/null +++ b/x/pool/module.go @@ -0,0 +1,157 @@ +package pool + +import ( + "encoding/json" + "fmt" + // this line is used by starport scaffolding # 1 + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/KYVENetwork/chain/x/pool/client/cli" + "github.com/KYVENetwork/chain/x/pool/keeper" + "github.com/KYVENetwork/chain/x/pool/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(_ client.Context, _ *runtime.ServeMux) {} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return nil +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper bankKeeper.Keeper +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper bankKeeper.Keeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + am.keeper.HandlePoolUpgrades(ctx) + return []abci.ValidatorUpdate{} +} diff --git a/x/pool/spec/01_concepts.md b/x/pool/spec/01_concepts.md new file mode 100644 index 00000000..0c7dd605 --- /dev/null +++ b/x/pool/spec/01_concepts.md @@ -0,0 +1,28 @@ + + +# Concepts + +This module contains the building block of validating and archiving +data with KYVE, the storage pools. KYVE allows multiple pools to exist +at once, all validating different kinds of data sources. A pool for +example could be responsible for validating Bitcoin data, another +pool for Ethereum data. Each staker then can join multiple pools at once +to validate more data and in return earn more rewards in $KYVE. + +## Storage Pool + +A storage pool is responsible for validating and archiving +a single type of data. As of now each pool can have up to 50 validators, where +the requirement of validating data in a pool is that those validators have a cumulative stake +greater or equal to the specified minimum stake. + +## Keeping Pools Funded + +Furthermore, funders are special actors who provide liquidity to a pool and basically pay +for the rewards the validators earn for their work. Funders would usually be +stakeholders of the data that is being archived and therefore have a strong interest +in further archiving the data. Once a valid bundle is produced and the reward is paid +out the pool module takes care of correctly deducting the funds equally from each funder +in order to guarantee a steady pool economy. diff --git a/x/pool/spec/02_state.md b/x/pool/spec/02_state.md new file mode 100644 index 00000000..2925d314 --- /dev/null +++ b/x/pool/spec/02_state.md @@ -0,0 +1,127 @@ + + +# State + +The module is mainly responsible for holding the pools state +and keeping track of pool funders. + +## Pools +The pool object is rather large and holds multiple sub-objects grouped +by functionality. + +### Pool +Pool is the main type and holds everything a pool needs to know including some +sub-objects which are listed below. + +- Pool: `0x01 | PoolId -> ProtocolBuffer(pool)` + +```protobuf +syntax = "proto3"; + +enum PoolStatus { + option (gogoproto.goproto_enum_prefix) = false; + + // POOL_STATUS_UNSPECIFIED ... + POOL_STATUS_UNSPECIFIED = 0; + // POOL_STATUS_ACTIVE ... + POOL_STATUS_ACTIVE = 1; + // POOL_STATUS_DISABLED ... + POOL_STATUS_DISABLED = 2; + // POOL_STATUS_NO_FUNDS ... + POOL_STATUS_NO_FUNDS = 3; + // POOL_STATUS_NOT_ENOUGH_DELEGATION ... + POOL_STATUS_NOT_ENOUGH_DELEGATION = 4; + // POOL_STATUS_UPGRADING ... + POOL_STATUS_UPGRADING = 5; +} + +message Protocol { + // version holds the current software version tag of the pool binaries + string version = 1; + // binaries is a stringified json object which holds binaries in the + // current version for multiple platforms and architectures + string binaries = 2; + // last_upgrade is the unix time the pool was upgraded the last time + uint64 last_upgrade = 3; +} + +message UpgradePlan { + // version is the new software version tag of the upgrade + string version = 1; + // binaries is the new stringified json object which holds binaries in the + // upgrade version for multiple platforms and architectures + string binaries = 2; + // scheduled_at is the unix time the upgrade is supposed to be done + uint64 scheduled_at = 3; + // duration is the time in seconds how long the pool should halt + // during the upgrade to give all validators a chance of switching + // to the new binaries + uint64 duration = 4; +} + +message Funder { + // address is the address of the funder + string address = 1; + // amount is the current amount of funds in ukyve the funder has + // still funded the pool with + uint64 amount = 2; +} +``` + +```protobuf +syntax = "proto3"; + +message Pool { + // id ... + uint64 id = 1; + // name ... + string name = 2; + // runtime ... + string runtime = 3; + // logo ... + string logo = 4; + // config ... + string config = 5; + + // start_key ... + string start_key = 6; + // current_key ... + string current_key = 7; + // current_summary ... + string current_summary = 8; + // current_index ... + uint64 current_index = 9; + + // total_bundles ... + uint64 total_bundles = 10; + + // upload_interval ... + uint64 upload_interval = 11; + // operating_cost ... + uint64 operating_cost = 12; + // min_delegation ... + uint64 min_delegation = 13; + // max_bundle_size ... + uint64 max_bundle_size = 14; + + // disabled ... + bool disabled = 15; + + // funders ... + repeated Funder funders = 16; + // total_funds ... + uint64 total_funds = 17; + + // protocol ... + Protocol protocol = 18; + // upgrade_plan ... + UpgradePlan upgrade_plan = 19; + + // storage_provider_id ... + uint32 current_storage_provider_id = 20; + // compression_id ... + uint32 current_compression_id = 21; +} +``` diff --git a/x/pool/spec/03_messages.md b/x/pool/spec/03_messages.md new file mode 100644 index 00000000..62cf8297 --- /dev/null +++ b/x/pool/spec/03_messages.md @@ -0,0 +1,65 @@ + + +# Messages + +## MsgFundPool + +With this transaction stakeholders of the pool can provide funds to a storage pool, so it can continue +validating and archiving data. Funding a pool does not earn any rewards, actually the opposite is the case. +By funding a pool the funders pay for the rewards the validators receive. If all funder +slots are occupied a user needs to fund more than the current lowest funder in order for +the transaction to succeed. + +## MsgDefundPool + +When a funder has funded a pool he can of course withdraw his funds again. If the full amount is defunded the funder +gets completely removed from the pool. Also funds can be partially defunded. + +## MsgCreatePool + +MsgCreatePool is a gov transaction and can be only called by the governance authority. To submit this transaction +someone has to create a MsgCreatePool governance proposal. + +This will create a new storage pool in the KYVE network where other participants can join. + +## MsgUpdatePool + +MsgUpdatePool is a gov transaction and can be only called by the governance authority. To submit this transaction +someone has to create a MsgUpdatePool governance proposal. + +This will update an existing storage pool based on the given parameters. + +## MsgDisablePool + +MsgDisablePool is a gov transaction and can be only called by the governance authority. To submit this transaction +someone has to create a MsgDisablePool governance proposal. + +This will disable a currently active pool. Once a pool is disabled it will not use any funds and therefore will not +validate or archive any data. + +## MsgEnablePool + +MsgEnablePool is a gov transaction and can be only called by the governance authority. To submit this transaction +someone has to create a MsgEnablePool governance proposal. + +This will enable a currently disabled pool. Once a pool is enabled it can continue to validate and archive data again. + +## MsgScheduleRuntimeUpgrade + +MsgScheduleRuntimeUpgrade is a gov transaction and can be only called by the governance authority. To submit +this transaction someone has to create a MsgScheduleRuntimeUpgrade governance proposal. + +This will schedule an upgrade for the specified runtime. A runtime upgrade contains an upgrade version and the +associated upgrade binaries. If the scheduled upgrade time is reached the upgrade will be performed with the +specified upgrade duration. + +## MsgCancelRuntimeUpgrade + +MsgCancelRuntimeUpgrade is a gov transaction and can be only called by the governance authority. To submit +this transaction someone has to create a MsgCancelRuntimeUpgrade governance proposal. + +This will cancel a scheduled runtime upgrade if it has not been reached yet. If the upgrade was already performed +it is not possible to cancel anymore. But it is still possible to downgrade a runtime by simply "upgrading" to the +prior version. diff --git a/x/pool/spec/04_end_block.md b/x/pool/spec/04_end_block.md new file mode 100644 index 00000000..45ed0e12 --- /dev/null +++ b/x/pool/spec/04_end_block.md @@ -0,0 +1,10 @@ + + +# EndBlock + +EndBlock is used to determine if a scheduled runtime upgrade needs to be performed based on the +provided upgrade time. If an upgrade is scheduled and the scheduled time is reached _end_block_ will copy over +the upgrade details to the actual pool version and pauses the pool for the specified duration. After the end of the +duration is reached _end_block_ again unpauses the pool, finishing the runtime upgrade. \ No newline at end of file diff --git a/x/pool/spec/05_params.md b/x/pool/spec/05_params.md new file mode 100644 index 00000000..563fc3ab --- /dev/null +++ b/x/pool/spec/05_params.md @@ -0,0 +1,7 @@ + + +# Parameters + +The pool module has no params diff --git a/x/pool/spec/06_events.md b/x/pool/spec/06_events.md new file mode 100644 index 00000000..5073b52b --- /dev/null +++ b/x/pool/spec/06_events.md @@ -0,0 +1,141 @@ + + +# Events + +The pool module contains the following events: + +## EventFundPool + +EventFundPool indicates that someone has funded a storage pool with a certain amount. + +```protobuf +syntax = "proto3"; + +message EventFundPool { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // address is the account address of the pool funder. + string address = 2; + // amount is the amount in ukyve the funder has funded + uint64 amount = 3; +} +``` + +It gets emitted by the following actions: + +- MsgFundPool + +## EventDefundPool + +EventDefundPool indicates that someone has defunded a storage pool with a certain amount. + +```protobuf +syntax = "proto3"; + +message EventDefundPool { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // address is the account address of the pool funder. + string address = 2; + // amount is the amount in ukyve the funder has defunded + uint64 amount = 3; +} +``` + +It gets emitted by the following actions: + +- MsgDefundPool + +## EventPoolFundsSlashed + +EventPoolFundsSlashed indicates a funder had not enough KYVE in his funder account anymore to pay for the +validator rewards. In this case the remaining funds get transferred to the treasury and the funder gets +removed. + +```protobuf +syntax = "proto3"; + +message EventPoolFundsSlashed { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; + // address is the account address of the pool funder. + string address = 2; + // amount is the amount in ukyve the validator has lost due to the slash + uint64 amount = 3; +} +``` + +It gets emitted by the following actions: + +- MsgSubmitBundleProposal + +## EventPoolOutOfFunds + +EventPoolOutOfFunds indicates that a pool has run out of funds and therefore pauses. If that happens someone +has to fund the pool, then the pool will automatically continue. + +```protobuf +syntax = "proto3"; + +message EventPoolOutOfFunds { + // pool_id is the unique ID of the pool. + uint64 pool_id = 1; +} +``` + +It gets emitted by the following actions: + +- MsgSubmitBundleProposal + +## EventCreatePool + +EventCreatePool indicates that a new storage pool has been created and is ready to validate and archive. + +```protobuf +syntax = "proto3"; + +message EventCreatePool { + // id is the unique ID of the pool. + uint64 id = 1; + // name is the human readable name of the pool + string name = 2; + // runtime is the runtime name of the pool + string runtime = 3; + // logo is the logo url of the pool + string logo = 4; + // config is either a json stringified config or an + // external link pointing to the config + string config = 5; + // start_key is the first key the pool should start + // indexing + string start_key = 6; + // upload_interval is the interval the pool should validate + // bundles with + uint64 upload_interval = 7; + // operating_cost is the fixed cost which gets paid out + // to every successful uploader + uint64 operating_cost = 8; + // min_delegation is the minimum amount of $KYVE the pool has + // to have in order to produce bundles + uint64 min_delegation = 9; + // max_bundle_size is the max size a data bundle can have + // (amount of data items) + uint64 max_bundle_size = 10; + // version is the current version of the protocol nodes + string version = 11; + // binaries points to the current binaries of the protocol node + string binaries = 12; + // storage_provider_id is the unique id of the storage provider + // the pool is archiving the data on + uint32 storage_provider_id = 13; + // compression_id is the unique id of the compression type the bundles + // get compressed with + uint32 compression_id = 14; +} +``` + +It gets emitted by the following actions: + +- MsgCreatePool diff --git a/x/pool/spec/07_exported.md b/x/pool/spec/07_exported.md new file mode 100644 index 00000000..bc3b75d9 --- /dev/null +++ b/x/pool/spec/07_exported.md @@ -0,0 +1,37 @@ + + +# Exported + +The `x/pool` module exports the following functions, which can be used +outside the module. + +```go +type PoolKeeper interface { + + // AssertPoolExists returns nil if the pool exists and types.ErrPoolNotFound if it does not. + AssertPoolExists(ctx sdk.Context, poolId uint64) error + + // GetPoolWithError returns a pool by its poolId, if the pool does not exist, + // a types.ErrPoolNotFound error is returned + GetPoolWithError(ctx sdk.Context, poolId uint64) (pooltypes.Pool, error) + + // TODO(@troy,@max) double check bundles module ( GetPoolWithError and GetPool) + GetPool(ctx sdk.Context, id uint64) (val pooltypes.Pool, found bool) + + // IncrementBundleInformation updates the latest finalized bundle of a pool + IncrementBundleInformation(ctx sdk.Context, poolId uint64, currentHeight uint64, currentKey string, currentValue string) + + GetAllPools(ctx sdk.Context) (list []pooltypes.Pool) + + // ChargeFundersOfPool equally splits the amount between all funders and removes + // the appropriate amount from each funder. + // All funders who can't afford the amount, are kicked out. + // Their remaining amount is transferred to the Treasury. + // The function throws an error if pool ran out of funds. + // This method does not transfer any funds. The bundles-module + // is responsible for transferring the rewards out of the module. + ChargeFundersOfPool(ctx sdk.Context, poolId uint64, amount uint64) error +} +``` diff --git a/x/pool/types/codec.go b/x/pool/types/codec.go new file mode 100644 index 00000000..5f388232 --- /dev/null +++ b/x/pool/types/codec.go @@ -0,0 +1,26 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterCodec(_ *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgFundPool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgDefundPool{}) + + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgCreatePool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdatePool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgDisablePool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgEnablePool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgScheduleRuntimeUpgrade{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgCancelRuntimeUpgrade{}) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/pool/types/errors.go b/x/pool/types/errors.go new file mode 100644 index 00000000..ef0e827b --- /dev/null +++ b/x/pool/types/errors.go @@ -0,0 +1,15 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ErrPoolNotFound = sdkerrors.Register(ModuleName, 1100, "pool with id %v does not exist") + +// funding errors +var ( + ErrFundsTooLow = sdkerrors.Register(ModuleName, 1101, "minimum funding amount of %vkyve not reached") + ErrDefundTooHigh = sdkerrors.Register(ModuleName, 1102, "maximum defunding amount of %vkyve surpassed") + ErrInvalidJson = sdkerrors.Register(ModuleName, 1103, "invalid json object: %v") + ErrInvalidArgs = sdkerrors.Register(ModuleName, 1104, "invalid args") +) diff --git a/x/pool/types/events.pb.go b/x/pool/types/events.pb.go new file mode 100644 index 00000000..b95eafe0 --- /dev/null +++ b/x/pool/types/events.pb.go @@ -0,0 +1,1803 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/pool/v1beta1/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventCreatePool ... +// emitted_by: EndBlock(gov) +type EventCreatePool struct { + // id is the unique ID of the pool. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // name is the human readable name of the pool + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // runtime is the runtime name of the pool + Runtime string `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` + // logo is the logo url of the pool + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` + // config is either a json stringified config or an + // external link pointing to the config + Config string `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` + // start_key is the first key the pool should start + // indexing + StartKey string `protobuf:"bytes,6,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // upload_interval is the interval the pool should validate + // bundles with + UploadInterval uint64 `protobuf:"varint,7,opt,name=upload_interval,json=uploadInterval,proto3" json:"upload_interval,omitempty"` + // operating_cost is the fixed cost which gets paid out + // to every successful uploader + OperatingCost uint64 `protobuf:"varint,8,opt,name=operating_cost,json=operatingCost,proto3" json:"operating_cost,omitempty"` + // min_delegation is the minimum amount of $KYVE the pool has + // to have in order to produce bundles + MinDelegation uint64 `protobuf:"varint,9,opt,name=min_delegation,json=minDelegation,proto3" json:"min_delegation,omitempty"` + // max_bundle_size is the max size a data bundle can have + // (amount of data items) + MaxBundleSize uint64 `protobuf:"varint,10,opt,name=max_bundle_size,json=maxBundleSize,proto3" json:"max_bundle_size,omitempty"` + // version is the current version of the protocol nodes + Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"` + // binaries points to the current binaries of the protocol node + Binaries string `protobuf:"bytes,12,opt,name=binaries,proto3" json:"binaries,omitempty"` + // storage_provider_id is the unique id of the storage provider + // the pool is archiving the data on + StorageProviderId uint32 `protobuf:"varint,13,opt,name=storage_provider_id,json=storageProviderId,proto3" json:"storage_provider_id,omitempty"` + // compression_id is the unique id of the compression type the bundles + // get compressed with + CompressionId uint32 `protobuf:"varint,14,opt,name=compression_id,json=compressionId,proto3" json:"compression_id,omitempty"` +} + +func (m *EventCreatePool) Reset() { *m = EventCreatePool{} } +func (m *EventCreatePool) String() string { return proto.CompactTextString(m) } +func (*EventCreatePool) ProtoMessage() {} +func (*EventCreatePool) Descriptor() ([]byte, []int) { + return fileDescriptor_c1828a100d789238, []int{0} +} +func (m *EventCreatePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventCreatePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventCreatePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventCreatePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventCreatePool.Merge(m, src) +} +func (m *EventCreatePool) XXX_Size() int { + return m.Size() +} +func (m *EventCreatePool) XXX_DiscardUnknown() { + xxx_messageInfo_EventCreatePool.DiscardUnknown(m) +} + +var xxx_messageInfo_EventCreatePool proto.InternalMessageInfo + +func (m *EventCreatePool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventCreatePool) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EventCreatePool) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *EventCreatePool) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +func (m *EventCreatePool) GetConfig() string { + if m != nil { + return m.Config + } + return "" +} + +func (m *EventCreatePool) GetStartKey() string { + if m != nil { + return m.StartKey + } + return "" +} + +func (m *EventCreatePool) GetUploadInterval() uint64 { + if m != nil { + return m.UploadInterval + } + return 0 +} + +func (m *EventCreatePool) GetOperatingCost() uint64 { + if m != nil { + return m.OperatingCost + } + return 0 +} + +func (m *EventCreatePool) GetMinDelegation() uint64 { + if m != nil { + return m.MinDelegation + } + return 0 +} + +func (m *EventCreatePool) GetMaxBundleSize() uint64 { + if m != nil { + return m.MaxBundleSize + } + return 0 +} + +func (m *EventCreatePool) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *EventCreatePool) GetBinaries() string { + if m != nil { + return m.Binaries + } + return "" +} + +func (m *EventCreatePool) GetStorageProviderId() uint32 { + if m != nil { + return m.StorageProviderId + } + return 0 +} + +func (m *EventCreatePool) GetCompressionId() uint32 { + if m != nil { + return m.CompressionId + } + return 0 +} + +// EventFundPool is an event emitted when a pool is funded. +// emitted_by: MsgFundPool +type EventFundPool struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // address is the account address of the pool funder. + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // amount is the amount in ukyve the funder has funded + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventFundPool) Reset() { *m = EventFundPool{} } +func (m *EventFundPool) String() string { return proto.CompactTextString(m) } +func (*EventFundPool) ProtoMessage() {} +func (*EventFundPool) Descriptor() ([]byte, []int) { + return fileDescriptor_c1828a100d789238, []int{1} +} +func (m *EventFundPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventFundPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventFundPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventFundPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventFundPool.Merge(m, src) +} +func (m *EventFundPool) XXX_Size() int { + return m.Size() +} +func (m *EventFundPool) XXX_DiscardUnknown() { + xxx_messageInfo_EventFundPool.DiscardUnknown(m) +} + +var xxx_messageInfo_EventFundPool proto.InternalMessageInfo + +func (m *EventFundPool) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventFundPool) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventFundPool) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventDefundPool is an event emitted when a pool is defunded. +// emitted_by: MsgDefundPool +type EventDefundPool struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // address is the account address of the pool funder. + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // amount is the amount in ukyve the funder has defunded + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventDefundPool) Reset() { *m = EventDefundPool{} } +func (m *EventDefundPool) String() string { return proto.CompactTextString(m) } +func (*EventDefundPool) ProtoMessage() {} +func (*EventDefundPool) Descriptor() ([]byte, []int) { + return fileDescriptor_c1828a100d789238, []int{2} +} +func (m *EventDefundPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDefundPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDefundPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDefundPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDefundPool.Merge(m, src) +} +func (m *EventDefundPool) XXX_Size() int { + return m.Size() +} +func (m *EventDefundPool) XXX_DiscardUnknown() { + xxx_messageInfo_EventDefundPool.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDefundPool proto.InternalMessageInfo + +func (m *EventDefundPool) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventDefundPool) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventDefundPool) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventDefundPool is an event emitted when a pool is defunded. +// emitted_by: MsgSubmitBundleProposal +type EventPoolFundsSlashed struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // address is the account address of the pool funder. + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // amount is the amount in ukyve the validator has lost due to the slash + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventPoolFundsSlashed) Reset() { *m = EventPoolFundsSlashed{} } +func (m *EventPoolFundsSlashed) String() string { return proto.CompactTextString(m) } +func (*EventPoolFundsSlashed) ProtoMessage() {} +func (*EventPoolFundsSlashed) Descriptor() ([]byte, []int) { + return fileDescriptor_c1828a100d789238, []int{3} +} +func (m *EventPoolFundsSlashed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventPoolFundsSlashed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventPoolFundsSlashed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventPoolFundsSlashed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventPoolFundsSlashed.Merge(m, src) +} +func (m *EventPoolFundsSlashed) XXX_Size() int { + return m.Size() +} +func (m *EventPoolFundsSlashed) XXX_DiscardUnknown() { + xxx_messageInfo_EventPoolFundsSlashed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventPoolFundsSlashed proto.InternalMessageInfo + +func (m *EventPoolFundsSlashed) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventPoolFundsSlashed) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *EventPoolFundsSlashed) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventPoolOutOfFunds is an event emitted when a pool has run out of funds +// emitted_by: MsgSubmitBundleProposal +type EventPoolOutOfFunds struct { + // pool_id is the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` +} + +func (m *EventPoolOutOfFunds) Reset() { *m = EventPoolOutOfFunds{} } +func (m *EventPoolOutOfFunds) String() string { return proto.CompactTextString(m) } +func (*EventPoolOutOfFunds) ProtoMessage() {} +func (*EventPoolOutOfFunds) Descriptor() ([]byte, []int) { + return fileDescriptor_c1828a100d789238, []int{4} +} +func (m *EventPoolOutOfFunds) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventPoolOutOfFunds) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventPoolOutOfFunds.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventPoolOutOfFunds) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventPoolOutOfFunds.Merge(m, src) +} +func (m *EventPoolOutOfFunds) XXX_Size() int { + return m.Size() +} +func (m *EventPoolOutOfFunds) XXX_DiscardUnknown() { + xxx_messageInfo_EventPoolOutOfFunds.DiscardUnknown(m) +} + +var xxx_messageInfo_EventPoolOutOfFunds proto.InternalMessageInfo + +func (m *EventPoolOutOfFunds) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func init() { + proto.RegisterType((*EventCreatePool)(nil), "kyve.pool.v1beta1.EventCreatePool") + proto.RegisterType((*EventFundPool)(nil), "kyve.pool.v1beta1.EventFundPool") + proto.RegisterType((*EventDefundPool)(nil), "kyve.pool.v1beta1.EventDefundPool") + proto.RegisterType((*EventPoolFundsSlashed)(nil), "kyve.pool.v1beta1.EventPoolFundsSlashed") + proto.RegisterType((*EventPoolOutOfFunds)(nil), "kyve.pool.v1beta1.EventPoolOutOfFunds") +} + +func init() { proto.RegisterFile("kyve/pool/v1beta1/events.proto", fileDescriptor_c1828a100d789238) } + +var fileDescriptor_c1828a100d789238 = []byte{ + // 507 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x53, 0x4f, 0x6b, 0xdb, 0x3e, + 0x18, 0x8e, 0xd3, 0xfc, 0xf2, 0x47, 0xbf, 0x25, 0xa1, 0x2a, 0xdb, 0xc4, 0x06, 0x26, 0x04, 0xb6, + 0x65, 0x17, 0x9b, 0xb2, 0x6f, 0xd0, 0xb4, 0x83, 0x50, 0x58, 0x4b, 0x0a, 0x83, 0x95, 0x81, 0x91, + 0xa3, 0x37, 0x8e, 0x88, 0x2d, 0x19, 0x49, 0xf6, 0x92, 0x7e, 0x8a, 0x7d, 0xa8, 0x1d, 0x76, 0xec, + 0x71, 0xc7, 0x91, 0x7c, 0x91, 0x21, 0xd9, 0x09, 0xbb, 0xec, 0xb6, 0xdd, 0xf4, 0xfc, 0xc9, 0xf3, + 0xbe, 0xd1, 0x63, 0x21, 0x7f, 0xbd, 0x2d, 0x21, 0xcc, 0xa5, 0x4c, 0xc3, 0xf2, 0x3c, 0x06, 0x43, + 0xcf, 0x43, 0x28, 0x41, 0x18, 0x1d, 0xe4, 0x4a, 0x1a, 0x89, 0x4f, 0xad, 0x1e, 0x58, 0x3d, 0xa8, + 0xf5, 0xf1, 0xb7, 0x13, 0x34, 0xbc, 0xb2, 0x9e, 0xa9, 0x02, 0x6a, 0xe0, 0x56, 0xca, 0x14, 0x0f, + 0x50, 0x93, 0x33, 0xe2, 0x8d, 0xbc, 0x49, 0x6b, 0xde, 0xe4, 0x0c, 0x63, 0xd4, 0x12, 0x34, 0x03, + 0xd2, 0x1c, 0x79, 0x93, 0xde, 0xdc, 0x9d, 0x31, 0x41, 0x1d, 0x55, 0x08, 0xc3, 0x33, 0x20, 0x27, + 0x8e, 0x3e, 0x40, 0xeb, 0x4e, 0x65, 0x22, 0x49, 0xab, 0x72, 0xdb, 0x33, 0x7e, 0x86, 0xda, 0x0b, + 0x29, 0x96, 0x3c, 0x21, 0xff, 0x39, 0xb6, 0x46, 0xf8, 0x25, 0xea, 0x69, 0x43, 0x95, 0x89, 0xd6, + 0xb0, 0x25, 0x6d, 0x27, 0x75, 0x1d, 0x71, 0x0d, 0x5b, 0xfc, 0x06, 0x0d, 0x8b, 0x3c, 0x95, 0x94, + 0x45, 0x5c, 0x18, 0x50, 0x25, 0x4d, 0x49, 0xc7, 0xed, 0x34, 0xa8, 0xe8, 0x59, 0xcd, 0xe2, 0x57, + 0x68, 0x20, 0x73, 0x50, 0xd4, 0x70, 0x91, 0x44, 0x0b, 0xa9, 0x0d, 0xe9, 0x3a, 0x5f, 0xff, 0xc8, + 0x4e, 0xa5, 0x36, 0xd6, 0x96, 0x71, 0x11, 0x31, 0x48, 0x21, 0xa1, 0x86, 0x4b, 0x41, 0x7a, 0x95, + 0x2d, 0xe3, 0xe2, 0xf2, 0x48, 0xe2, 0xd7, 0x68, 0x98, 0xd1, 0x4d, 0x14, 0x17, 0x82, 0xa5, 0x10, + 0x69, 0xfe, 0x00, 0x04, 0xd5, 0x3e, 0xba, 0xb9, 0x70, 0xec, 0x1d, 0x7f, 0x70, 0x37, 0x50, 0x82, + 0xd2, 0x36, 0xe7, 0xff, 0xea, 0x06, 0x6a, 0x88, 0x5f, 0xa0, 0x6e, 0xcc, 0x05, 0x55, 0x1c, 0x34, + 0x79, 0x52, 0xfd, 0xa9, 0x03, 0xc6, 0x01, 0x3a, 0xd3, 0x46, 0x2a, 0x9a, 0x40, 0x94, 0x2b, 0x59, + 0x72, 0x06, 0x2a, 0xe2, 0x8c, 0xf4, 0x47, 0xde, 0xa4, 0x3f, 0x3f, 0xad, 0xa5, 0xdb, 0x5a, 0x99, + 0x31, 0xbb, 0xf4, 0x42, 0x66, 0xb9, 0x02, 0x6d, 0xa3, 0xad, 0x75, 0xe0, 0xac, 0xfd, 0xdf, 0xd8, + 0x19, 0x1b, 0xdf, 0xa3, 0xbe, 0x6b, 0xf1, 0x7d, 0x21, 0x98, 0xeb, 0xf0, 0x39, 0xea, 0xd8, 0x9e, + 0xa3, 0x63, 0x91, 0x6d, 0x0b, 0x67, 0xcc, 0xae, 0x4d, 0x19, 0xb3, 0xbf, 0xac, 0xfb, 0x3c, 0x40, + 0x5b, 0x12, 0xcd, 0x64, 0x21, 0x8c, 0x6b, 0xb4, 0x35, 0xaf, 0xd1, 0xf8, 0x73, 0xfd, 0x85, 0x5c, + 0xc2, 0xf2, 0x1f, 0xa4, 0xc7, 0xe8, 0xa9, 0x4b, 0xb7, 0xb9, 0x76, 0x7b, 0x7d, 0x97, 0x52, 0xbd, + 0x02, 0xf6, 0x37, 0x67, 0x04, 0xe8, 0xec, 0x38, 0xe3, 0xa6, 0x30, 0x37, 0x4b, 0x37, 0xe8, 0x8f, + 0x13, 0x2e, 0xa6, 0xdf, 0x77, 0xbe, 0xf7, 0xb8, 0xf3, 0xbd, 0x9f, 0x3b, 0xdf, 0xfb, 0xba, 0xf7, + 0x1b, 0x8f, 0x7b, 0xbf, 0xf1, 0x63, 0xef, 0x37, 0xee, 0xdf, 0x26, 0xdc, 0xac, 0x8a, 0x38, 0x58, + 0xc8, 0x2c, 0xbc, 0xfe, 0xf4, 0xf1, 0xea, 0x03, 0x98, 0x2f, 0x52, 0xad, 0xc3, 0xc5, 0x8a, 0x72, + 0x11, 0x6e, 0xaa, 0xb7, 0x67, 0xb6, 0x39, 0xe8, 0xb8, 0xed, 0xde, 0xdc, 0xbb, 0x5f, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xc3, 0xb7, 0xf1, 0x50, 0x95, 0x03, 0x00, 0x00, +} + +func (m *EventCreatePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventCreatePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventCreatePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CompressionId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.CompressionId)) + i-- + dAtA[i] = 0x70 + } + if m.StorageProviderId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.StorageProviderId)) + i-- + dAtA[i] = 0x68 + } + if len(m.Binaries) > 0 { + i -= len(m.Binaries) + copy(dAtA[i:], m.Binaries) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Binaries))) + i-- + dAtA[i] = 0x62 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x5a + } + if m.MaxBundleSize != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.MaxBundleSize)) + i-- + dAtA[i] = 0x50 + } + if m.MinDelegation != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.MinDelegation)) + i-- + dAtA[i] = 0x48 + } + if m.OperatingCost != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.OperatingCost)) + i-- + dAtA[i] = 0x40 + } + if m.UploadInterval != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.UploadInterval)) + i-- + dAtA[i] = 0x38 + } + if len(m.StartKey) > 0 { + i -= len(m.StartKey) + copy(dAtA[i:], m.StartKey) + i = encodeVarintEvents(dAtA, i, uint64(len(m.StartKey))) + i-- + dAtA[i] = 0x32 + } + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x2a + } + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventFundPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventFundPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventFundPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventDefundPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDefundPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDefundPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventPoolFundsSlashed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventPoolFundsSlashed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventPoolFundsSlashed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventPoolOutOfFunds) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventPoolOutOfFunds) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventPoolOutOfFunds) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventCreatePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Config) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.UploadInterval != 0 { + n += 1 + sovEvents(uint64(m.UploadInterval)) + } + if m.OperatingCost != 0 { + n += 1 + sovEvents(uint64(m.OperatingCost)) + } + if m.MinDelegation != 0 { + n += 1 + sovEvents(uint64(m.MinDelegation)) + } + if m.MaxBundleSize != 0 { + n += 1 + sovEvents(uint64(m.MaxBundleSize)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Binaries) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.StorageProviderId != 0 { + n += 1 + sovEvents(uint64(m.StorageProviderId)) + } + if m.CompressionId != 0 { + n += 1 + sovEvents(uint64(m.CompressionId)) + } + return n +} + +func (m *EventFundPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventDefundPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventPoolFundsSlashed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventPoolOutOfFunds) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventCreatePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventCreatePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventCreatePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadInterval", wireType) + } + m.UploadInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UploadInterval |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingCost", wireType) + } + m.OperatingCost = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OperatingCost |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDelegation", wireType) + } + m.MinDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBundleSize", wireType) + } + m.MaxBundleSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBundleSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binaries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Binaries = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageProviderId", wireType) + } + m.StorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressionId", wireType) + } + m.CompressionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressionId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventFundPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventFundPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventFundPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventDefundPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDefundPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDefundPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventPoolFundsSlashed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventPoolFundsSlashed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventPoolFundsSlashed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventPoolOutOfFunds) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventPoolOutOfFunds: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventPoolOutOfFunds: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/pool/types/expected_keepers.go b/x/pool/types/expected_keepers.go new file mode 100644 index 00000000..3c5ee050 --- /dev/null +++ b/x/pool/types/expected_keepers.go @@ -0,0 +1,17 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +type AccountKeeper interface{} + +type UpgradeKeeper interface { + ScheduleUpgrade(ctx sdk.Context, plan upgradetypes.Plan) error +} + +type StakersKeeper interface { + LeavePool(ctx sdk.Context, staker string, poolId uint64) + GetAllStakerAddressesOfPool(ctx sdk.Context, poolId uint64) (stakers []string) +} diff --git a/x/pool/types/genesis.go b/x/pool/types/genesis.go new file mode 100644 index 00000000..d30ce883 --- /dev/null +++ b/x/pool/types/genesis.go @@ -0,0 +1,30 @@ +package types + +import "fmt" + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{} +} + +// Validate performs basic genesis state validation returning an error upon any failure. +func (gs GenesisState) Validate() error { + // Check for duplicated index in DelegationEntries + poolIndexMap := make(map[string]struct{}) + + for _, elem := range gs.PoolList { + index := string(PoolKeyPrefix(elem.Id)) + if _, ok := poolIndexMap[index]; ok { + return fmt.Errorf("duplicated pool id %v", elem) + } + poolIndexMap[index] = struct{}{} + if elem.Id >= gs.PoolCount { + return fmt.Errorf("pool id higher than pool count %v", elem) + } + if len(elem.Funders) > MaxFunders { + return fmt.Errorf("more funders than allowed %v", elem) + } + } + + return nil +} diff --git a/x/pool/types/genesis.pb.go b/x/pool/types/genesis.pb.go new file mode 100644 index 00000000..74c70044 --- /dev/null +++ b/x/pool/types/genesis.pb.go @@ -0,0 +1,370 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/pool/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the pool module's genesis state. +type GenesisState struct { + // pool_list ... + PoolList []Pool `protobuf:"bytes,2,rep,name=pool_list,json=poolList,proto3" json:"pool_list"` + // pool_count ... + PoolCount uint64 `protobuf:"varint,3,opt,name=pool_count,json=poolCount,proto3" json:"pool_count,omitempty"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_ba827ab14a3de899, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetPoolList() []Pool { + if m != nil { + return m.PoolList + } + return nil +} + +func (m *GenesisState) GetPoolCount() uint64 { + if m != nil { + return m.PoolCount + } + return 0 +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "kyve.pool.v1beta1.GenesisState") +} + +func init() { proto.RegisterFile("kyve/pool/v1beta1/genesis.proto", fileDescriptor_ba827ab14a3de899) } + +var fileDescriptor_ba827ab14a3de899 = []byte{ + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcf, 0xae, 0x2c, 0x4b, + 0xd5, 0x2f, 0xc8, 0xcf, 0xcf, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x4f, 0x4f, + 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x04, 0x29, 0xd0, + 0x03, 0x29, 0xd0, 0x83, 0x2a, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xea, 0x83, 0x58, + 0x10, 0x85, 0x52, 0x32, 0x98, 0x26, 0x81, 0x75, 0x81, 0x65, 0x95, 0xca, 0xb9, 0x78, 0xdc, 0x21, + 0xe6, 0x06, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x59, 0x71, 0x71, 0x82, 0x64, 0xe3, 0x73, 0x32, 0x8b, + 0x4b, 0x24, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0xc4, 0xf5, 0x30, 0xac, 0xd2, 0x0b, 0xc8, 0xcf, + 0xcf, 0x71, 0x62, 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x88, 0x03, 0x24, 0xe1, 0x93, 0x59, 0x5c, 0x22, + 0x24, 0xcb, 0xc5, 0x05, 0xd6, 0x9b, 0x9c, 0x5f, 0x9a, 0x57, 0x22, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, + 0x12, 0x04, 0x36, 0xcd, 0x19, 0x24, 0xe0, 0xc5, 0xc2, 0xc1, 0x28, 0xc0, 0x14, 0xc4, 0x56, 0x90, + 0x58, 0x94, 0x98, 0x5b, 0xec, 0xe4, 0x7c, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, + 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, + 0x51, 0x9a, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xde, 0x91, 0x61, + 0xae, 0x7e, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, 0xfa, 0xc9, 0x19, 0x89, 0x99, 0x79, 0xfa, 0x15, + 0x10, 0xaf, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0x3d, 0x61, 0x0c, 0x08, 0x00, 0x00, + 0xff, 0xff, 0x22, 0xa0, 0x4e, 0x17, 0x2e, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolCount != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.PoolCount)) + i-- + dAtA[i] = 0x18 + } + if len(m.PoolList) > 0 { + for iNdEx := len(m.PoolList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PoolList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PoolList) > 0 { + for _, e := range m.PoolList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if m.PoolCount != 0 { + n += 1 + sovGenesis(uint64(m.PoolCount)) + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PoolList = append(m.PoolList, Pool{}) + if err := m.PoolList[len(m.PoolList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolCount", wireType) + } + m.PoolCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/pool/types/keys.go b/x/pool/types/keys.go new file mode 100644 index 00000000..39013aa0 --- /dev/null +++ b/x/pool/types/keys.go @@ -0,0 +1,32 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +const ( + // ModuleName defines the module name + ModuleName = "pool" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_pool" +) + +const ( + MaxFunders = 50 // maximum amount of funders which are allowed +) + +var ( + PoolKey = []byte{1} + PoolCountKey = []byte{2} +) + +func PoolKeyPrefix(poolId uint64) []byte { + return util.GetByteKey(poolId) +} diff --git a/x/pool/types/message_defund_pool.go b/x/pool/types/message_defund_pool.go new file mode 100644 index 00000000..d75a3167 --- /dev/null +++ b/x/pool/types/message_defund_pool.go @@ -0,0 +1,47 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgDefundPool = "defund_pool" + +var _ sdk.Msg = &MsgDefundPool{} + +func NewMsgDefundPool(creator string, id uint64, amount uint64) *MsgDefundPool { + return &MsgDefundPool{ + Creator: creator, + Id: id, + Amount: amount, + } +} + +func (msg *MsgDefundPool) Route() string { + return RouterKey +} + +func (msg *MsgDefundPool) Type() string { + return TypeMsgDefundPool +} + +func (msg *MsgDefundPool) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgDefundPool) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgDefundPool) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/pool/types/message_fund_pool.go b/x/pool/types/message_fund_pool.go new file mode 100644 index 00000000..f8f9e0b8 --- /dev/null +++ b/x/pool/types/message_fund_pool.go @@ -0,0 +1,47 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgFundPool = "fund_pool" + +var _ sdk.Msg = &MsgFundPool{} + +func NewMsgFundPool(creator string, id uint64, amount uint64) *MsgFundPool { + return &MsgFundPool{ + Creator: creator, + Id: id, + Amount: amount, + } +} + +func (msg *MsgFundPool) Route() string { + return RouterKey +} + +func (msg *MsgFundPool) Type() string { + return TypeMsgFundPool +} + +func (msg *MsgFundPool) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgFundPool) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgFundPool) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/pool/types/msg.go b/x/pool/types/msg.go new file mode 100644 index 00000000..8bc391f0 --- /dev/null +++ b/x/pool/types/msg.go @@ -0,0 +1,105 @@ +package types + +import ( + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + _ sdk.Msg = &MsgCreatePool{} + _ sdk.Msg = &MsgUpdatePool{} + _ sdk.Msg = &MsgDisablePool{} + _ sdk.Msg = &MsgEnablePool{} + _ sdk.Msg = &MsgScheduleRuntimeUpgrade{} + _ sdk.Msg = &MsgCancelRuntimeUpgrade{} +) + +// GetSigners returns the expected signers for a MsgCreatePool message. +func (msg *MsgCreatePool) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgCreatePool) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + return nil +} + +// GetSigners returns the expected signers for a MsgUpdatePool message. +func (msg *MsgUpdatePool) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdatePool) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + return nil +} + +// GetSigners returns the expected signers for a MsgDisablePool message. +func (msg *MsgDisablePool) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgDisablePool) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + return nil +} + +// GetSigners returns the expected signers for a MsgEnablePool message. +func (msg *MsgEnablePool) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgEnablePool) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + return nil +} + +// GetSigners returns the expected signers for a MsgScheduleRuntimeUpgrade message. +func (msg *MsgScheduleRuntimeUpgrade) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgScheduleRuntimeUpgrade) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + return nil +} + +// GetSigners returns the expected signers for a MsgCancelRuntimeUpgrade message. +func (msg *MsgCancelRuntimeUpgrade) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgCancelRuntimeUpgrade) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + return nil +} diff --git a/x/pool/types/pool.go b/x/pool/types/pool.go new file mode 100644 index 00000000..ee7209f1 --- /dev/null +++ b/x/pool/types/pool.go @@ -0,0 +1,73 @@ +package types + +import "math" + +// Handles the funders of a pool. Functions safely add and remove funds to funders. +// If amount drops to zero the funder is automatically removed from the list. + +// AddAmountToFunder adds the given amount to an existing funder. +// If the funder does not exist, a new funder is inserted. +func (m *Pool) AddAmountToFunder(funderAddress string, amount uint64) { + for _, v := range m.Funders { + if v.Address == funderAddress { + m.TotalFunds += amount + v.Amount += amount + return + } + } + if amount > 0 { + // Funder was not found, insert new funder + m.Funders = append(m.Funders, &Funder{ + Address: funderAddress, + Amount: amount, + }) + m.TotalFunds += amount + } +} + +// SubtractAmountFromFunder subtracts the given amount form an existing funder +// If the amount is grater or equal to the funders amount, the funder is removed. +func (m *Pool) SubtractAmountFromFunder(funderAddress string, amount uint64) { + for i := range m.Funders { + if m.Funders[i].Address == funderAddress { + if m.Funders[i].Amount > amount { + m.TotalFunds -= amount + m.Funders[i].Amount -= amount + } else { + m.TotalFunds -= m.Funders[i].Amount + + // Remove funder + m.Funders[i] = m.Funders[len(m.Funders)-1] + m.Funders = m.Funders[:len(m.Funders)-1] + } + return + } + } +} + +func (m *Pool) RemoveFunder(funderAddress string) { + m.SubtractAmountFromFunder(funderAddress, math.MaxUint64) +} + +func (m *Pool) GetFunderAmount(address string) uint64 { + for _, v := range m.Funders { + if v.Address == address { + return v.Amount + } + } + return 0 +} + +func (m *Pool) GetLowestFunder() Funder { + if len(m.Funders) == 0 { + return Funder{} + } + + lowestFunder := m.Funders[0] + for _, v := range m.Funders { + if v.Amount < lowestFunder.Amount { + lowestFunder = v + } + } + return *lowestFunder +} diff --git a/x/pool/types/pool.pb.go b/x/pool/types/pool.pb.go new file mode 100644 index 00000000..d4bda31d --- /dev/null +++ b/x/pool/types/pool.pb.go @@ -0,0 +1,2087 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/pool/v1beta1/pool.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// PoolStatus ... +type PoolStatus int32 + +const ( + // POOL_STATUS_UNSPECIFIED ... + POOL_STATUS_UNSPECIFIED PoolStatus = 0 + // POOL_STATUS_ACTIVE ... + POOL_STATUS_ACTIVE PoolStatus = 1 + // POOL_STATUS_DISABLED ... + POOL_STATUS_DISABLED PoolStatus = 2 + // POOL_STATUS_NO_FUNDS ... + POOL_STATUS_NO_FUNDS PoolStatus = 3 + // POOL_STATUS_NOT_ENOUGH_DELEGATION ... + POOL_STATUS_NOT_ENOUGH_DELEGATION PoolStatus = 4 + // POOL_STATUS_UPGRADING ... + POOL_STATUS_UPGRADING PoolStatus = 5 +) + +var PoolStatus_name = map[int32]string{ + 0: "POOL_STATUS_UNSPECIFIED", + 1: "POOL_STATUS_ACTIVE", + 2: "POOL_STATUS_DISABLED", + 3: "POOL_STATUS_NO_FUNDS", + 4: "POOL_STATUS_NOT_ENOUGH_DELEGATION", + 5: "POOL_STATUS_UPGRADING", +} + +var PoolStatus_value = map[string]int32{ + "POOL_STATUS_UNSPECIFIED": 0, + "POOL_STATUS_ACTIVE": 1, + "POOL_STATUS_DISABLED": 2, + "POOL_STATUS_NO_FUNDS": 3, + "POOL_STATUS_NOT_ENOUGH_DELEGATION": 4, + "POOL_STATUS_UPGRADING": 5, +} + +func (x PoolStatus) String() string { + return proto.EnumName(PoolStatus_name, int32(x)) +} + +func (PoolStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_40c1730f47ff2ef8, []int{0} +} + +// Protocol holds all info about the current pool version and the +// available binaries for participating as a validator in a pool +type Protocol struct { + // version holds the current software version tag of the pool binaries + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // binaries is a stringified json object which holds binaries in the + // current version for multiple platforms and architectures + Binaries string `protobuf:"bytes,2,opt,name=binaries,proto3" json:"binaries,omitempty"` + // last_upgrade is the unix time the pool was upgraded the last time + LastUpgrade uint64 `protobuf:"varint,3,opt,name=last_upgrade,json=lastUpgrade,proto3" json:"last_upgrade,omitempty"` +} + +func (m *Protocol) Reset() { *m = Protocol{} } +func (m *Protocol) String() string { return proto.CompactTextString(m) } +func (*Protocol) ProtoMessage() {} +func (*Protocol) Descriptor() ([]byte, []int) { + return fileDescriptor_40c1730f47ff2ef8, []int{0} +} +func (m *Protocol) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Protocol) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Protocol.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Protocol) XXX_Merge(src proto.Message) { + xxx_messageInfo_Protocol.Merge(m, src) +} +func (m *Protocol) XXX_Size() int { + return m.Size() +} +func (m *Protocol) XXX_DiscardUnknown() { + xxx_messageInfo_Protocol.DiscardUnknown(m) +} + +var xxx_messageInfo_Protocol proto.InternalMessageInfo + +func (m *Protocol) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Protocol) GetBinaries() string { + if m != nil { + return m.Binaries + } + return "" +} + +func (m *Protocol) GetLastUpgrade() uint64 { + if m != nil { + return m.LastUpgrade + } + return 0 +} + +// Upgrade holds all info when a pool has a scheduled upgrade +type UpgradePlan struct { + // version is the new software version tag of the upgrade + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // binaries is the new stringified json object which holds binaries in the + // upgrade version for multiple platforms and architectures + Binaries string `protobuf:"bytes,2,opt,name=binaries,proto3" json:"binaries,omitempty"` + // scheduled_at is the unix time the upgrade is supposed to be done + ScheduledAt uint64 `protobuf:"varint,3,opt,name=scheduled_at,json=scheduledAt,proto3" json:"scheduled_at,omitempty"` + // duration is the time in seconds how long the pool should halt + // during the upgrade to give all validators a chance of switching + // to the new binaries + Duration uint64 `protobuf:"varint,4,opt,name=duration,proto3" json:"duration,omitempty"` +} + +func (m *UpgradePlan) Reset() { *m = UpgradePlan{} } +func (m *UpgradePlan) String() string { return proto.CompactTextString(m) } +func (*UpgradePlan) ProtoMessage() {} +func (*UpgradePlan) Descriptor() ([]byte, []int) { + return fileDescriptor_40c1730f47ff2ef8, []int{1} +} +func (m *UpgradePlan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpgradePlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpgradePlan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpgradePlan) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradePlan.Merge(m, src) +} +func (m *UpgradePlan) XXX_Size() int { + return m.Size() +} +func (m *UpgradePlan) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradePlan.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradePlan proto.InternalMessageInfo + +func (m *UpgradePlan) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *UpgradePlan) GetBinaries() string { + if m != nil { + return m.Binaries + } + return "" +} + +func (m *UpgradePlan) GetScheduledAt() uint64 { + if m != nil { + return m.ScheduledAt + } + return 0 +} + +func (m *UpgradePlan) GetDuration() uint64 { + if m != nil { + return m.Duration + } + return 0 +} + +// Funder is the object which holds info about a single pool funder +type Funder struct { + // address is the address of the funder + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // amount is the current amount of funds in ukyve the funder has + // still funded the pool with + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *Funder) Reset() { *m = Funder{} } +func (m *Funder) String() string { return proto.CompactTextString(m) } +func (*Funder) ProtoMessage() {} +func (*Funder) Descriptor() ([]byte, []int) { + return fileDescriptor_40c1730f47ff2ef8, []int{2} +} +func (m *Funder) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Funder) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Funder.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Funder) XXX_Merge(src proto.Message) { + xxx_messageInfo_Funder.Merge(m, src) +} +func (m *Funder) XXX_Size() int { + return m.Size() +} +func (m *Funder) XXX_DiscardUnknown() { + xxx_messageInfo_Funder.DiscardUnknown(m) +} + +var xxx_messageInfo_Funder proto.InternalMessageInfo + +func (m *Funder) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Funder) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// Pool ... +type Pool struct { + // id - unique identifier of the pool, can not be changed + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // name is a human readable name for the pool + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // runtime specified which protocol and which version needs is required + Runtime string `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` + // logo is a link to an image file + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` + // config is either a JSON encoded string or a link to an external storage provider. + // This is up to the implementation of the protocol node. + Config string `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` + // start_key ... + StartKey string `protobuf:"bytes,6,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // current_key ... + CurrentKey string `protobuf:"bytes,7,opt,name=current_key,json=currentKey,proto3" json:"current_key,omitempty"` + // current_summary ... + CurrentSummary string `protobuf:"bytes,8,opt,name=current_summary,json=currentSummary,proto3" json:"current_summary,omitempty"` + // current_index ... + CurrentIndex uint64 `protobuf:"varint,9,opt,name=current_index,json=currentIndex,proto3" json:"current_index,omitempty"` + // total_bundles is the number of total finalized bundles + TotalBundles uint64 `protobuf:"varint,10,opt,name=total_bundles,json=totalBundles,proto3" json:"total_bundles,omitempty"` + // upload_interval ... + UploadInterval uint64 `protobuf:"varint,11,opt,name=upload_interval,json=uploadInterval,proto3" json:"upload_interval,omitempty"` + // operating_cost ... + OperatingCost uint64 `protobuf:"varint,12,opt,name=operating_cost,json=operatingCost,proto3" json:"operating_cost,omitempty"` + // min_delegation ... + MinDelegation uint64 `protobuf:"varint,13,opt,name=min_delegation,json=minDelegation,proto3" json:"min_delegation,omitempty"` + // max_bundle_size ... + MaxBundleSize uint64 `protobuf:"varint,14,opt,name=max_bundle_size,json=maxBundleSize,proto3" json:"max_bundle_size,omitempty"` + // disabled is true when the pool is disabled. + // Can only be done via governance. + Disabled bool `protobuf:"varint,15,opt,name=disabled,proto3" json:"disabled,omitempty"` + // funders ... + Funders []*Funder `protobuf:"bytes,16,rep,name=funders,proto3" json:"funders,omitempty"` + // total_funds ... + TotalFunds uint64 `protobuf:"varint,17,opt,name=total_funds,json=totalFunds,proto3" json:"total_funds,omitempty"` + // protocol ... + Protocol *Protocol `protobuf:"bytes,18,opt,name=protocol,proto3" json:"protocol,omitempty"` + // upgrade_plan ... + UpgradePlan *UpgradePlan `protobuf:"bytes,19,opt,name=upgrade_plan,json=upgradePlan,proto3" json:"upgrade_plan,omitempty"` + // storage_provider_id ... + CurrentStorageProviderId uint32 `protobuf:"varint,20,opt,name=current_storage_provider_id,json=currentStorageProviderId,proto3" json:"current_storage_provider_id,omitempty"` + // compression_id ... + CurrentCompressionId uint32 `protobuf:"varint,21,opt,name=current_compression_id,json=currentCompressionId,proto3" json:"current_compression_id,omitempty"` +} + +func (m *Pool) Reset() { *m = Pool{} } +func (m *Pool) String() string { return proto.CompactTextString(m) } +func (*Pool) ProtoMessage() {} +func (*Pool) Descriptor() ([]byte, []int) { + return fileDescriptor_40c1730f47ff2ef8, []int{3} +} +func (m *Pool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Pool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Pool) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pool.Merge(m, src) +} +func (m *Pool) XXX_Size() int { + return m.Size() +} +func (m *Pool) XXX_DiscardUnknown() { + xxx_messageInfo_Pool.DiscardUnknown(m) +} + +var xxx_messageInfo_Pool proto.InternalMessageInfo + +func (m *Pool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Pool) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Pool) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *Pool) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +func (m *Pool) GetConfig() string { + if m != nil { + return m.Config + } + return "" +} + +func (m *Pool) GetStartKey() string { + if m != nil { + return m.StartKey + } + return "" +} + +func (m *Pool) GetCurrentKey() string { + if m != nil { + return m.CurrentKey + } + return "" +} + +func (m *Pool) GetCurrentSummary() string { + if m != nil { + return m.CurrentSummary + } + return "" +} + +func (m *Pool) GetCurrentIndex() uint64 { + if m != nil { + return m.CurrentIndex + } + return 0 +} + +func (m *Pool) GetTotalBundles() uint64 { + if m != nil { + return m.TotalBundles + } + return 0 +} + +func (m *Pool) GetUploadInterval() uint64 { + if m != nil { + return m.UploadInterval + } + return 0 +} + +func (m *Pool) GetOperatingCost() uint64 { + if m != nil { + return m.OperatingCost + } + return 0 +} + +func (m *Pool) GetMinDelegation() uint64 { + if m != nil { + return m.MinDelegation + } + return 0 +} + +func (m *Pool) GetMaxBundleSize() uint64 { + if m != nil { + return m.MaxBundleSize + } + return 0 +} + +func (m *Pool) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *Pool) GetFunders() []*Funder { + if m != nil { + return m.Funders + } + return nil +} + +func (m *Pool) GetTotalFunds() uint64 { + if m != nil { + return m.TotalFunds + } + return 0 +} + +func (m *Pool) GetProtocol() *Protocol { + if m != nil { + return m.Protocol + } + return nil +} + +func (m *Pool) GetUpgradePlan() *UpgradePlan { + if m != nil { + return m.UpgradePlan + } + return nil +} + +func (m *Pool) GetCurrentStorageProviderId() uint32 { + if m != nil { + return m.CurrentStorageProviderId + } + return 0 +} + +func (m *Pool) GetCurrentCompressionId() uint32 { + if m != nil { + return m.CurrentCompressionId + } + return 0 +} + +func init() { + proto.RegisterEnum("kyve.pool.v1beta1.PoolStatus", PoolStatus_name, PoolStatus_value) + proto.RegisterType((*Protocol)(nil), "kyve.pool.v1beta1.Protocol") + proto.RegisterType((*UpgradePlan)(nil), "kyve.pool.v1beta1.UpgradePlan") + proto.RegisterType((*Funder)(nil), "kyve.pool.v1beta1.Funder") + proto.RegisterType((*Pool)(nil), "kyve.pool.v1beta1.Pool") +} + +func init() { proto.RegisterFile("kyve/pool/v1beta1/pool.proto", fileDescriptor_40c1730f47ff2ef8) } + +var fileDescriptor_40c1730f47ff2ef8 = []byte{ + // 818 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x41, 0x6f, 0xdb, 0x36, + 0x14, 0xc7, 0xad, 0xc4, 0x75, 0x6c, 0x3a, 0x71, 0x5c, 0x2e, 0xcd, 0xd8, 0x64, 0xf0, 0xdc, 0x0c, + 0xdd, 0xbc, 0x1d, 0x6c, 0xb4, 0x1d, 0x30, 0x60, 0xc0, 0x0e, 0x8e, 0xed, 0x64, 0x42, 0x03, 0xdb, + 0x90, 0xed, 0x02, 0xdb, 0x45, 0xa0, 0x45, 0x56, 0x21, 0x22, 0x91, 0x02, 0x49, 0x79, 0x71, 0x8f, + 0x3b, 0xed, 0xb8, 0xef, 0xd0, 0xcf, 0xb1, 0xfb, 0x8e, 0x3d, 0xee, 0x38, 0x24, 0x5f, 0x64, 0x20, + 0x25, 0x79, 0xee, 0xd6, 0x53, 0x6f, 0x7c, 0xbf, 0xff, 0xff, 0xe9, 0x3d, 0xf3, 0xf1, 0x19, 0x7c, + 0x76, 0xb3, 0x5e, 0xd1, 0x5e, 0x22, 0x44, 0xd4, 0x5b, 0x3d, 0x5b, 0x52, 0x8d, 0x9f, 0xd9, 0xa0, + 0x9b, 0x48, 0xa1, 0x05, 0x7c, 0x68, 0xd4, 0xae, 0x05, 0xb9, 0x7a, 0x72, 0x14, 0x8a, 0x50, 0x58, + 0xb5, 0x67, 0x4e, 0x99, 0xf1, 0x2c, 0x00, 0xd5, 0xa9, 0x39, 0x04, 0x22, 0x82, 0x08, 0xec, 0xad, + 0xa8, 0x54, 0x4c, 0x70, 0xe4, 0xb4, 0x9d, 0x4e, 0xcd, 0x2b, 0x42, 0x78, 0x02, 0xaa, 0x4b, 0xc6, + 0xb1, 0x64, 0x54, 0xa1, 0x1d, 0x2b, 0x6d, 0x62, 0xf8, 0x04, 0xec, 0x47, 0x58, 0x69, 0x3f, 0x4d, + 0x42, 0x89, 0x09, 0x45, 0xbb, 0x6d, 0xa7, 0x53, 0xf6, 0xea, 0x86, 0x2d, 0x32, 0x74, 0xf6, 0xab, + 0x03, 0xea, 0xf9, 0x79, 0x1a, 0x61, 0xfe, 0xf1, 0x85, 0x54, 0x70, 0x4d, 0x49, 0x1a, 0x51, 0xe2, + 0x63, 0x5d, 0x14, 0xda, 0xb0, 0xbe, 0x36, 0xe9, 0x24, 0x95, 0x58, 0x9b, 0x2f, 0x97, 0xad, 0xbc, + 0x89, 0xcf, 0xbe, 0x07, 0x95, 0x8b, 0x94, 0x13, 0x2a, 0x4d, 0x79, 0x4c, 0x88, 0xa4, 0x4a, 0x15, + 0xe5, 0xf3, 0x10, 0x1e, 0x83, 0x0a, 0x8e, 0x45, 0xca, 0xb5, 0x2d, 0x5e, 0xf6, 0xf2, 0xe8, 0xec, + 0x6d, 0x05, 0x94, 0xa7, 0x42, 0x44, 0xb0, 0x01, 0x76, 0x18, 0xb1, 0x59, 0x65, 0x6f, 0x87, 0x11, + 0x08, 0x41, 0x99, 0xe3, 0x98, 0xe6, 0xbd, 0xda, 0xb3, 0xf9, 0xbc, 0x4c, 0xb9, 0x66, 0x71, 0x76, + 0x17, 0x35, 0xaf, 0x08, 0x8d, 0x3b, 0x12, 0xa1, 0xb0, 0xad, 0xd5, 0x3c, 0x7b, 0x36, 0x25, 0x03, + 0xc1, 0x5f, 0xb3, 0x10, 0x3d, 0xb0, 0x34, 0x8f, 0xe0, 0x29, 0xa8, 0x29, 0x8d, 0xa5, 0xf6, 0x6f, + 0xe8, 0x1a, 0x55, 0xb2, 0xab, 0xb0, 0xe0, 0x25, 0x5d, 0xc3, 0xcf, 0x41, 0x3d, 0x48, 0xa5, 0xa4, + 0x3c, 0x93, 0xf7, 0xac, 0x0c, 0x72, 0x64, 0x0c, 0x5f, 0x81, 0xc3, 0xc2, 0xa0, 0xd2, 0x38, 0xc6, + 0x72, 0x8d, 0xaa, 0xd6, 0xd4, 0xc8, 0xf1, 0x2c, 0xa3, 0xf0, 0x0b, 0x70, 0x50, 0x18, 0x19, 0x27, + 0xf4, 0x16, 0xd5, 0xec, 0x6f, 0xdb, 0xcf, 0xa1, 0x6b, 0x98, 0x31, 0x69, 0xa1, 0x71, 0xe4, 0x2f, + 0x53, 0x4e, 0x22, 0xaa, 0x10, 0xc8, 0x4c, 0x16, 0x9e, 0x67, 0xcc, 0x94, 0x4c, 0x93, 0x48, 0x60, + 0xe2, 0x33, 0xae, 0xa9, 0x5c, 0xe1, 0x08, 0xd5, 0xad, 0xad, 0x91, 0x61, 0x37, 0xa7, 0xf0, 0x29, + 0x68, 0x88, 0x84, 0x9a, 0xa9, 0xf0, 0xd0, 0x0f, 0x84, 0xd2, 0x68, 0xdf, 0xfa, 0x0e, 0x36, 0x74, + 0x20, 0x94, 0x36, 0xb6, 0x98, 0x71, 0x9f, 0xd0, 0x88, 0x86, 0xd9, 0x44, 0x0f, 0x32, 0x5b, 0xcc, + 0xf8, 0x70, 0x03, 0xe1, 0x97, 0xe0, 0x30, 0xc6, 0xb7, 0x79, 0x67, 0xbe, 0x62, 0x6f, 0x28, 0x6a, + 0xe4, 0x3e, 0x7c, 0x9b, 0xf5, 0x36, 0x63, 0x6f, 0xa8, 0x7d, 0x1a, 0x4c, 0xe1, 0x65, 0x44, 0x09, + 0x3a, 0x6c, 0x3b, 0x9d, 0xaa, 0xb7, 0x89, 0xe1, 0x0b, 0xb0, 0xf7, 0xda, 0x3e, 0x0d, 0x85, 0x9a, + 0xed, 0xdd, 0x4e, 0xfd, 0xf9, 0xe3, 0xee, 0xff, 0xf6, 0xa7, 0x9b, 0x3d, 0x1e, 0xaf, 0x70, 0x9a, + 0x19, 0x64, 0x97, 0x62, 0x80, 0x42, 0x0f, 0x6d, 0x51, 0x60, 0x91, 0xb1, 0x2a, 0xf8, 0x1d, 0xa8, + 0x26, 0xf9, 0x6a, 0x21, 0xd8, 0x76, 0x3a, 0xf5, 0xe7, 0xa7, 0x1f, 0xf8, 0x6c, 0xb1, 0x7d, 0xde, + 0xc6, 0x0c, 0xfb, 0x60, 0x3f, 0x5f, 0x26, 0x3f, 0x89, 0x30, 0x47, 0x9f, 0xd8, 0xe4, 0xd6, 0x07, + 0x92, 0xb7, 0x96, 0xca, 0xab, 0xa7, 0x5b, 0x1b, 0xf6, 0x03, 0x38, 0xdd, 0xcc, 0x5f, 0x0b, 0x89, + 0x43, 0xea, 0x27, 0x52, 0xac, 0x18, 0xa1, 0xd2, 0x67, 0x04, 0x1d, 0xb5, 0x9d, 0xce, 0x81, 0x87, + 0x8a, 0xb7, 0x90, 0x39, 0xa6, 0xb9, 0xc1, 0x25, 0xf0, 0x5b, 0x70, 0x5c, 0xa4, 0x07, 0x22, 0x4e, + 0xcc, 0x6e, 0x30, 0xc1, 0x4d, 0xe6, 0x23, 0x9b, 0x79, 0x94, 0xab, 0x83, 0x7f, 0x45, 0x97, 0x7c, + 0xf3, 0x87, 0x03, 0x80, 0xd9, 0x92, 0x99, 0xc6, 0x3a, 0x55, 0xf0, 0x14, 0x7c, 0x3a, 0x9d, 0x4c, + 0xae, 0xfc, 0xd9, 0xbc, 0x3f, 0x5f, 0xcc, 0xfc, 0xc5, 0x78, 0x36, 0x1d, 0x0d, 0xdc, 0x0b, 0x77, + 0x34, 0x6c, 0x96, 0xe0, 0x31, 0x80, 0xdb, 0x62, 0x7f, 0x30, 0x77, 0x5f, 0x8d, 0x9a, 0x0e, 0x44, + 0xe0, 0x68, 0x9b, 0x0f, 0xdd, 0x59, 0xff, 0xfc, 0x6a, 0x34, 0x6c, 0xee, 0xfc, 0x57, 0x19, 0x4f, + 0xfc, 0x8b, 0xc5, 0x78, 0x38, 0x6b, 0xee, 0xc2, 0xa7, 0xe0, 0xc9, 0xfb, 0xca, 0xdc, 0x1f, 0x8d, + 0x27, 0x8b, 0xcb, 0x1f, 0xfd, 0xe1, 0xe8, 0x6a, 0x74, 0xd9, 0x9f, 0xbb, 0x93, 0x71, 0xb3, 0x0c, + 0x1f, 0x83, 0x47, 0xef, 0xf5, 0x33, 0xbd, 0xf4, 0xfa, 0x43, 0x77, 0x7c, 0xd9, 0x7c, 0x70, 0x52, + 0xfe, 0xed, 0x6d, 0xab, 0x74, 0x3e, 0xf8, 0xf3, 0xae, 0xe5, 0xbc, 0xbb, 0x6b, 0x39, 0x7f, 0xdf, + 0xb5, 0x9c, 0xdf, 0xef, 0x5b, 0xa5, 0x77, 0xf7, 0xad, 0xd2, 0x5f, 0xf7, 0xad, 0xd2, 0xcf, 0x5f, + 0x87, 0x4c, 0x5f, 0xa7, 0xcb, 0x6e, 0x20, 0xe2, 0xde, 0xcb, 0x9f, 0x5e, 0x8d, 0xc6, 0x54, 0xff, + 0x22, 0xe4, 0x4d, 0x2f, 0xb8, 0xc6, 0x8c, 0xf7, 0x6e, 0xb3, 0xbf, 0x61, 0xbd, 0x4e, 0xa8, 0x5a, + 0x56, 0xec, 0x18, 0x5f, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0xec, 0x2d, 0x4a, 0x94, 0xa0, 0x05, + 0x00, 0x00, +} + +func (m *Protocol) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Protocol) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Protocol) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastUpgrade != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.LastUpgrade)) + i-- + dAtA[i] = 0x18 + } + if len(m.Binaries) > 0 { + i -= len(m.Binaries) + copy(dAtA[i:], m.Binaries) + i = encodeVarintPool(dAtA, i, uint64(len(m.Binaries))) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintPool(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpgradePlan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpgradePlan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpgradePlan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Duration != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x20 + } + if m.ScheduledAt != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.ScheduledAt)) + i-- + dAtA[i] = 0x18 + } + if len(m.Binaries) > 0 { + i -= len(m.Binaries) + copy(dAtA[i:], m.Binaries) + i = encodeVarintPool(dAtA, i, uint64(len(m.Binaries))) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintPool(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Funder) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Funder) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Funder) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintPool(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Pool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentCompressionId != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.CurrentCompressionId)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if m.CurrentStorageProviderId != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.CurrentStorageProviderId)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 + } + if m.UpgradePlan != nil { + { + size, err := m.UpgradePlan.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.Protocol != nil { + { + size, err := m.Protocol.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.TotalFunds != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.TotalFunds)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + } + if len(m.Funders) > 0 { + for iNdEx := len(m.Funders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Funders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPool(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + if m.MaxBundleSize != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.MaxBundleSize)) + i-- + dAtA[i] = 0x70 + } + if m.MinDelegation != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.MinDelegation)) + i-- + dAtA[i] = 0x68 + } + if m.OperatingCost != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.OperatingCost)) + i-- + dAtA[i] = 0x60 + } + if m.UploadInterval != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.UploadInterval)) + i-- + dAtA[i] = 0x58 + } + if m.TotalBundles != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.TotalBundles)) + i-- + dAtA[i] = 0x50 + } + if m.CurrentIndex != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.CurrentIndex)) + i-- + dAtA[i] = 0x48 + } + if len(m.CurrentSummary) > 0 { + i -= len(m.CurrentSummary) + copy(dAtA[i:], m.CurrentSummary) + i = encodeVarintPool(dAtA, i, uint64(len(m.CurrentSummary))) + i-- + dAtA[i] = 0x42 + } + if len(m.CurrentKey) > 0 { + i -= len(m.CurrentKey) + copy(dAtA[i:], m.CurrentKey) + i = encodeVarintPool(dAtA, i, uint64(len(m.CurrentKey))) + i-- + dAtA[i] = 0x3a + } + if len(m.StartKey) > 0 { + i -= len(m.StartKey) + copy(dAtA[i:], m.StartKey) + i = encodeVarintPool(dAtA, i, uint64(len(m.StartKey))) + i-- + dAtA[i] = 0x32 + } + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintPool(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x2a + } + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintPool(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintPool(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintPool(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintPool(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintPool(dAtA []byte, offset int, v uint64) int { + offset -= sovPool(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Protocol) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Binaries) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + if m.LastUpgrade != 0 { + n += 1 + sovPool(uint64(m.LastUpgrade)) + } + return n +} + +func (m *UpgradePlan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Binaries) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + if m.ScheduledAt != 0 { + n += 1 + sovPool(uint64(m.ScheduledAt)) + } + if m.Duration != 0 { + n += 1 + sovPool(uint64(m.Duration)) + } + return n +} + +func (m *Funder) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovPool(uint64(m.Amount)) + } + return n +} + +func (m *Pool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPool(uint64(m.Id)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.Config) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.CurrentKey) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + l = len(m.CurrentSummary) + if l > 0 { + n += 1 + l + sovPool(uint64(l)) + } + if m.CurrentIndex != 0 { + n += 1 + sovPool(uint64(m.CurrentIndex)) + } + if m.TotalBundles != 0 { + n += 1 + sovPool(uint64(m.TotalBundles)) + } + if m.UploadInterval != 0 { + n += 1 + sovPool(uint64(m.UploadInterval)) + } + if m.OperatingCost != 0 { + n += 1 + sovPool(uint64(m.OperatingCost)) + } + if m.MinDelegation != 0 { + n += 1 + sovPool(uint64(m.MinDelegation)) + } + if m.MaxBundleSize != 0 { + n += 1 + sovPool(uint64(m.MaxBundleSize)) + } + if m.Disabled { + n += 2 + } + if len(m.Funders) > 0 { + for _, e := range m.Funders { + l = e.Size() + n += 2 + l + sovPool(uint64(l)) + } + } + if m.TotalFunds != 0 { + n += 2 + sovPool(uint64(m.TotalFunds)) + } + if m.Protocol != nil { + l = m.Protocol.Size() + n += 2 + l + sovPool(uint64(l)) + } + if m.UpgradePlan != nil { + l = m.UpgradePlan.Size() + n += 2 + l + sovPool(uint64(l)) + } + if m.CurrentStorageProviderId != 0 { + n += 2 + sovPool(uint64(m.CurrentStorageProviderId)) + } + if m.CurrentCompressionId != 0 { + n += 2 + sovPool(uint64(m.CurrentCompressionId)) + } + return n +} + +func sovPool(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPool(x uint64) (n int) { + return sovPool(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Protocol) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Protocol: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Protocol: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binaries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Binaries = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpgrade", wireType) + } + m.LastUpgrade = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastUpgrade |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpgradePlan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpgradePlan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpgradePlan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binaries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Binaries = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduledAt", wireType) + } + m.ScheduledAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ScheduledAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Funder) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Funder: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Funder: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentSummary", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentSummary = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentIndex", wireType) + } + m.CurrentIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalBundles", wireType) + } + m.TotalBundles = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalBundles |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadInterval", wireType) + } + m.UploadInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UploadInterval |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingCost", wireType) + } + m.OperatingCost = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OperatingCost |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDelegation", wireType) + } + m.MinDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBundleSize", wireType) + } + m.MaxBundleSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBundleSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Funders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Funders = append(m.Funders, &Funder{}) + if err := m.Funders[len(m.Funders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalFunds", wireType) + } + m.TotalFunds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalFunds |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Protocol == nil { + m.Protocol = &Protocol{} + } + if err := m.Protocol.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpgradePlan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPool + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPool + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpgradePlan == nil { + m.UpgradePlan = &UpgradePlan{} + } + if err := m.UpgradePlan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentStorageProviderId", wireType) + } + m.CurrentStorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentStorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCompressionId", wireType) + } + m.CurrentCompressionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPool + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentCompressionId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPool(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPool + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPool(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPool + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPool + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPool + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPool + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPool + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPool + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPool = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPool = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPool = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/pool/types/tx.pb.go b/x/pool/types/tx.pb.go new file mode 100644 index 00000000..161f9336 --- /dev/null +++ b/x/pool/types/tx.pb.go @@ -0,0 +1,4053 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/pool/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgFundPool defines a SDK message for funding a pool. +type MsgFundPool struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // id ... + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgFundPool) Reset() { *m = MsgFundPool{} } +func (m *MsgFundPool) String() string { return proto.CompactTextString(m) } +func (*MsgFundPool) ProtoMessage() {} +func (*MsgFundPool) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{0} +} +func (m *MsgFundPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgFundPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgFundPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgFundPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgFundPool.Merge(m, src) +} +func (m *MsgFundPool) XXX_Size() int { + return m.Size() +} +func (m *MsgFundPool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgFundPool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgFundPool proto.InternalMessageInfo + +func (m *MsgFundPool) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgFundPool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MsgFundPool) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgFundPoolResponse defines the Msg/DefundPool response type. +type MsgFundPoolResponse struct { +} + +func (m *MsgFundPoolResponse) Reset() { *m = MsgFundPoolResponse{} } +func (m *MsgFundPoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgFundPoolResponse) ProtoMessage() {} +func (*MsgFundPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{1} +} +func (m *MsgFundPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgFundPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgFundPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgFundPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgFundPoolResponse.Merge(m, src) +} +func (m *MsgFundPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgFundPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgFundPoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgFundPoolResponse proto.InternalMessageInfo + +// MsgDefundPool defines a SDK message for defunding a pool. +type MsgDefundPool struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // id ... + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgDefundPool) Reset() { *m = MsgDefundPool{} } +func (m *MsgDefundPool) String() string { return proto.CompactTextString(m) } +func (*MsgDefundPool) ProtoMessage() {} +func (*MsgDefundPool) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{2} +} +func (m *MsgDefundPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDefundPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDefundPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDefundPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDefundPool.Merge(m, src) +} +func (m *MsgDefundPool) XXX_Size() int { + return m.Size() +} +func (m *MsgDefundPool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDefundPool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDefundPool proto.InternalMessageInfo + +func (m *MsgDefundPool) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgDefundPool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MsgDefundPool) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgDefundPoolResponse defines the Msg/DefundPool response type. +type MsgDefundPoolResponse struct { +} + +func (m *MsgDefundPoolResponse) Reset() { *m = MsgDefundPoolResponse{} } +func (m *MsgDefundPoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDefundPoolResponse) ProtoMessage() {} +func (*MsgDefundPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{3} +} +func (m *MsgDefundPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDefundPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDefundPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDefundPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDefundPoolResponse.Merge(m, src) +} +func (m *MsgDefundPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDefundPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDefundPoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDefundPoolResponse proto.InternalMessageInfo + +// MsgCreatePool defines a SDK message for creating a new pool. +type MsgCreatePool struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // name ... + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // runtime ... + Runtime string `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` + // logo ... + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` + // config ... + Config string `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` + // start_key ... + StartKey string `protobuf:"bytes,6,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` + // upload_interval ... + UploadInterval uint64 `protobuf:"varint,7,opt,name=upload_interval,json=uploadInterval,proto3" json:"upload_interval,omitempty"` + // operating_cost ... + OperatingCost uint64 `protobuf:"varint,8,opt,name=operating_cost,json=operatingCost,proto3" json:"operating_cost,omitempty"` + // min_delegation ... + MinDelegation uint64 `protobuf:"varint,9,opt,name=min_delegation,json=minDelegation,proto3" json:"min_delegation,omitempty"` + // max_bundle_size ... + MaxBundleSize uint64 `protobuf:"varint,10,opt,name=max_bundle_size,json=maxBundleSize,proto3" json:"max_bundle_size,omitempty"` + // version ... + Version string `protobuf:"bytes,11,opt,name=version,proto3" json:"version,omitempty"` + // binaries ... + Binaries string `protobuf:"bytes,12,opt,name=binaries,proto3" json:"binaries,omitempty"` + // storage_provider_id ... + StorageProviderId uint32 `protobuf:"varint,13,opt,name=storage_provider_id,json=storageProviderId,proto3" json:"storage_provider_id,omitempty"` + // compression_id ... + CompressionId uint32 `protobuf:"varint,14,opt,name=compression_id,json=compressionId,proto3" json:"compression_id,omitempty"` +} + +func (m *MsgCreatePool) Reset() { *m = MsgCreatePool{} } +func (m *MsgCreatePool) String() string { return proto.CompactTextString(m) } +func (*MsgCreatePool) ProtoMessage() {} +func (*MsgCreatePool) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{4} +} +func (m *MsgCreatePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreatePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreatePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreatePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreatePool.Merge(m, src) +} +func (m *MsgCreatePool) XXX_Size() int { + return m.Size() +} +func (m *MsgCreatePool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreatePool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreatePool proto.InternalMessageInfo + +func (m *MsgCreatePool) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgCreatePool) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MsgCreatePool) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *MsgCreatePool) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +func (m *MsgCreatePool) GetConfig() string { + if m != nil { + return m.Config + } + return "" +} + +func (m *MsgCreatePool) GetStartKey() string { + if m != nil { + return m.StartKey + } + return "" +} + +func (m *MsgCreatePool) GetUploadInterval() uint64 { + if m != nil { + return m.UploadInterval + } + return 0 +} + +func (m *MsgCreatePool) GetOperatingCost() uint64 { + if m != nil { + return m.OperatingCost + } + return 0 +} + +func (m *MsgCreatePool) GetMinDelegation() uint64 { + if m != nil { + return m.MinDelegation + } + return 0 +} + +func (m *MsgCreatePool) GetMaxBundleSize() uint64 { + if m != nil { + return m.MaxBundleSize + } + return 0 +} + +func (m *MsgCreatePool) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *MsgCreatePool) GetBinaries() string { + if m != nil { + return m.Binaries + } + return "" +} + +func (m *MsgCreatePool) GetStorageProviderId() uint32 { + if m != nil { + return m.StorageProviderId + } + return 0 +} + +func (m *MsgCreatePool) GetCompressionId() uint32 { + if m != nil { + return m.CompressionId + } + return 0 +} + +// MsgCreatePoolResponse defines the Msg/CreatePool response type. +type MsgCreatePoolResponse struct { +} + +func (m *MsgCreatePoolResponse) Reset() { *m = MsgCreatePoolResponse{} } +func (m *MsgCreatePoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreatePoolResponse) ProtoMessage() {} +func (*MsgCreatePoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{5} +} +func (m *MsgCreatePoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreatePoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreatePoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreatePoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreatePoolResponse.Merge(m, src) +} +func (m *MsgCreatePoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreatePoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreatePoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreatePoolResponse proto.InternalMessageInfo + +// MsgUpdatePool defines a SDK message for updating an existing pool. +type MsgUpdatePool struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // id ... + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // payload ... + Payload string `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *MsgUpdatePool) Reset() { *m = MsgUpdatePool{} } +func (m *MsgUpdatePool) String() string { return proto.CompactTextString(m) } +func (*MsgUpdatePool) ProtoMessage() {} +func (*MsgUpdatePool) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{6} +} +func (m *MsgUpdatePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdatePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdatePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdatePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdatePool.Merge(m, src) +} +func (m *MsgUpdatePool) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdatePool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdatePool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdatePool proto.InternalMessageInfo + +func (m *MsgUpdatePool) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdatePool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MsgUpdatePool) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +// MsgUpdatePoolResponse defines the Msg/UpdatePool response type. +type MsgUpdatePoolResponse struct { +} + +func (m *MsgUpdatePoolResponse) Reset() { *m = MsgUpdatePoolResponse{} } +func (m *MsgUpdatePoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdatePoolResponse) ProtoMessage() {} +func (*MsgUpdatePoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{7} +} +func (m *MsgUpdatePoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdatePoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdatePoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdatePoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdatePoolResponse.Merge(m, src) +} +func (m *MsgUpdatePoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdatePoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdatePoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdatePoolResponse proto.InternalMessageInfo + +// MsgDisablePool defines a SDK message for disabling an existing pool. +type MsgDisablePool struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // id ... + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *MsgDisablePool) Reset() { *m = MsgDisablePool{} } +func (m *MsgDisablePool) String() string { return proto.CompactTextString(m) } +func (*MsgDisablePool) ProtoMessage() {} +func (*MsgDisablePool) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{8} +} +func (m *MsgDisablePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDisablePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDisablePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDisablePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDisablePool.Merge(m, src) +} +func (m *MsgDisablePool) XXX_Size() int { + return m.Size() +} +func (m *MsgDisablePool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDisablePool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDisablePool proto.InternalMessageInfo + +func (m *MsgDisablePool) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgDisablePool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// MsgDisablePoolResponse defines the Msg/DisablePool response type. +type MsgDisablePoolResponse struct { +} + +func (m *MsgDisablePoolResponse) Reset() { *m = MsgDisablePoolResponse{} } +func (m *MsgDisablePoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDisablePoolResponse) ProtoMessage() {} +func (*MsgDisablePoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{9} +} +func (m *MsgDisablePoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDisablePoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDisablePoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDisablePoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDisablePoolResponse.Merge(m, src) +} +func (m *MsgDisablePoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDisablePoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDisablePoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDisablePoolResponse proto.InternalMessageInfo + +// MsgEnablePool defines a SDK message for enabling an existing pool. +type MsgEnablePool struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // id ... + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *MsgEnablePool) Reset() { *m = MsgEnablePool{} } +func (m *MsgEnablePool) String() string { return proto.CompactTextString(m) } +func (*MsgEnablePool) ProtoMessage() {} +func (*MsgEnablePool) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{10} +} +func (m *MsgEnablePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgEnablePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgEnablePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgEnablePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgEnablePool.Merge(m, src) +} +func (m *MsgEnablePool) XXX_Size() int { + return m.Size() +} +func (m *MsgEnablePool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgEnablePool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgEnablePool proto.InternalMessageInfo + +func (m *MsgEnablePool) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgEnablePool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// MsgEnablePoolResponse defines the Msg/EnablePool response type. +type MsgEnablePoolResponse struct { +} + +func (m *MsgEnablePoolResponse) Reset() { *m = MsgEnablePoolResponse{} } +func (m *MsgEnablePoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgEnablePoolResponse) ProtoMessage() {} +func (*MsgEnablePoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{11} +} +func (m *MsgEnablePoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgEnablePoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgEnablePoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgEnablePoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgEnablePoolResponse.Merge(m, src) +} +func (m *MsgEnablePoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgEnablePoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgEnablePoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgEnablePoolResponse proto.InternalMessageInfo + +// MsgScheduleRuntimeUpgrade defines a SDK message for scheduling a runtime upgrade. +type MsgScheduleRuntimeUpgrade struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // runtime ... + Runtime string `protobuf:"bytes,2,opt,name=runtime,proto3" json:"runtime,omitempty"` + // version ... + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + // scheduled_at ... + ScheduledAt uint64 `protobuf:"varint,4,opt,name=scheduled_at,json=scheduledAt,proto3" json:"scheduled_at,omitempty"` + // duration ... + Duration uint64 `protobuf:"varint,5,opt,name=duration,proto3" json:"duration,omitempty"` + // binaries ... + Binaries string `protobuf:"bytes,6,opt,name=binaries,proto3" json:"binaries,omitempty"` +} + +func (m *MsgScheduleRuntimeUpgrade) Reset() { *m = MsgScheduleRuntimeUpgrade{} } +func (m *MsgScheduleRuntimeUpgrade) String() string { return proto.CompactTextString(m) } +func (*MsgScheduleRuntimeUpgrade) ProtoMessage() {} +func (*MsgScheduleRuntimeUpgrade) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{12} +} +func (m *MsgScheduleRuntimeUpgrade) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgScheduleRuntimeUpgrade) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgScheduleRuntimeUpgrade.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgScheduleRuntimeUpgrade) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgScheduleRuntimeUpgrade.Merge(m, src) +} +func (m *MsgScheduleRuntimeUpgrade) XXX_Size() int { + return m.Size() +} +func (m *MsgScheduleRuntimeUpgrade) XXX_DiscardUnknown() { + xxx_messageInfo_MsgScheduleRuntimeUpgrade.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgScheduleRuntimeUpgrade proto.InternalMessageInfo + +func (m *MsgScheduleRuntimeUpgrade) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgScheduleRuntimeUpgrade) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *MsgScheduleRuntimeUpgrade) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *MsgScheduleRuntimeUpgrade) GetScheduledAt() uint64 { + if m != nil { + return m.ScheduledAt + } + return 0 +} + +func (m *MsgScheduleRuntimeUpgrade) GetDuration() uint64 { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *MsgScheduleRuntimeUpgrade) GetBinaries() string { + if m != nil { + return m.Binaries + } + return "" +} + +// MsgScheduleRuntimeUpgradeResponse defines the Msg/ScheduleRuntimeUpgrade response type. +type MsgScheduleRuntimeUpgradeResponse struct { +} + +func (m *MsgScheduleRuntimeUpgradeResponse) Reset() { *m = MsgScheduleRuntimeUpgradeResponse{} } +func (m *MsgScheduleRuntimeUpgradeResponse) String() string { return proto.CompactTextString(m) } +func (*MsgScheduleRuntimeUpgradeResponse) ProtoMessage() {} +func (*MsgScheduleRuntimeUpgradeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{13} +} +func (m *MsgScheduleRuntimeUpgradeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgScheduleRuntimeUpgradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgScheduleRuntimeUpgradeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgScheduleRuntimeUpgradeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgScheduleRuntimeUpgradeResponse.Merge(m, src) +} +func (m *MsgScheduleRuntimeUpgradeResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgScheduleRuntimeUpgradeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgScheduleRuntimeUpgradeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgScheduleRuntimeUpgradeResponse proto.InternalMessageInfo + +// MsgCancelRuntimeUpgrade defines a SDK message for cancelling a runtime upgrade. +type MsgCancelRuntimeUpgrade struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // runtime ... + Runtime string `protobuf:"bytes,2,opt,name=runtime,proto3" json:"runtime,omitempty"` +} + +func (m *MsgCancelRuntimeUpgrade) Reset() { *m = MsgCancelRuntimeUpgrade{} } +func (m *MsgCancelRuntimeUpgrade) String() string { return proto.CompactTextString(m) } +func (*MsgCancelRuntimeUpgrade) ProtoMessage() {} +func (*MsgCancelRuntimeUpgrade) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{14} +} +func (m *MsgCancelRuntimeUpgrade) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCancelRuntimeUpgrade) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCancelRuntimeUpgrade.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCancelRuntimeUpgrade) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCancelRuntimeUpgrade.Merge(m, src) +} +func (m *MsgCancelRuntimeUpgrade) XXX_Size() int { + return m.Size() +} +func (m *MsgCancelRuntimeUpgrade) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCancelRuntimeUpgrade.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCancelRuntimeUpgrade proto.InternalMessageInfo + +func (m *MsgCancelRuntimeUpgrade) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgCancelRuntimeUpgrade) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +// MsgCancelRuntimeUpgradeResponse defines the Msg/CancelRuntimeUpgrade response type. +type MsgCancelRuntimeUpgradeResponse struct { +} + +func (m *MsgCancelRuntimeUpgradeResponse) Reset() { *m = MsgCancelRuntimeUpgradeResponse{} } +func (m *MsgCancelRuntimeUpgradeResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCancelRuntimeUpgradeResponse) ProtoMessage() {} +func (*MsgCancelRuntimeUpgradeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_20ddefdf83388ddc, []int{15} +} +func (m *MsgCancelRuntimeUpgradeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCancelRuntimeUpgradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCancelRuntimeUpgradeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCancelRuntimeUpgradeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCancelRuntimeUpgradeResponse.Merge(m, src) +} +func (m *MsgCancelRuntimeUpgradeResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCancelRuntimeUpgradeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCancelRuntimeUpgradeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCancelRuntimeUpgradeResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgFundPool)(nil), "kyve.pool.v1beta1.MsgFundPool") + proto.RegisterType((*MsgFundPoolResponse)(nil), "kyve.pool.v1beta1.MsgFundPoolResponse") + proto.RegisterType((*MsgDefundPool)(nil), "kyve.pool.v1beta1.MsgDefundPool") + proto.RegisterType((*MsgDefundPoolResponse)(nil), "kyve.pool.v1beta1.MsgDefundPoolResponse") + proto.RegisterType((*MsgCreatePool)(nil), "kyve.pool.v1beta1.MsgCreatePool") + proto.RegisterType((*MsgCreatePoolResponse)(nil), "kyve.pool.v1beta1.MsgCreatePoolResponse") + proto.RegisterType((*MsgUpdatePool)(nil), "kyve.pool.v1beta1.MsgUpdatePool") + proto.RegisterType((*MsgUpdatePoolResponse)(nil), "kyve.pool.v1beta1.MsgUpdatePoolResponse") + proto.RegisterType((*MsgDisablePool)(nil), "kyve.pool.v1beta1.MsgDisablePool") + proto.RegisterType((*MsgDisablePoolResponse)(nil), "kyve.pool.v1beta1.MsgDisablePoolResponse") + proto.RegisterType((*MsgEnablePool)(nil), "kyve.pool.v1beta1.MsgEnablePool") + proto.RegisterType((*MsgEnablePoolResponse)(nil), "kyve.pool.v1beta1.MsgEnablePoolResponse") + proto.RegisterType((*MsgScheduleRuntimeUpgrade)(nil), "kyve.pool.v1beta1.MsgScheduleRuntimeUpgrade") + proto.RegisterType((*MsgScheduleRuntimeUpgradeResponse)(nil), "kyve.pool.v1beta1.MsgScheduleRuntimeUpgradeResponse") + proto.RegisterType((*MsgCancelRuntimeUpgrade)(nil), "kyve.pool.v1beta1.MsgCancelRuntimeUpgrade") + proto.RegisterType((*MsgCancelRuntimeUpgradeResponse)(nil), "kyve.pool.v1beta1.MsgCancelRuntimeUpgradeResponse") +} + +func init() { proto.RegisterFile("kyve/pool/v1beta1/tx.proto", fileDescriptor_20ddefdf83388ddc) } + +var fileDescriptor_20ddefdf83388ddc = []byte{ + // 841 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0x8e, 0x93, 0xd4, 0x89, 0x4f, 0x6a, 0x57, 0x9d, 0xb6, 0xe9, 0x74, 0x91, 0x4c, 0x62, 0x44, + 0x49, 0x11, 0xd8, 0x6a, 0x41, 0xdc, 0x37, 0x69, 0x91, 0xa2, 0x2a, 0x50, 0x36, 0x2a, 0x14, 0xb8, + 0x58, 0x8d, 0x77, 0xa6, 0x9b, 0x51, 0x76, 0x67, 0x96, 0x99, 0x59, 0x13, 0x57, 0x3c, 0x04, 0x17, + 0x3c, 0x0a, 0x0f, 0xc1, 0x65, 0xc5, 0x15, 0x97, 0x28, 0xb9, 0xe3, 0x29, 0xd0, 0xcc, 0xae, 0x77, + 0xd7, 0xd8, 0x4b, 0xf9, 0x49, 0xef, 0x7c, 0xce, 0xf9, 0xf2, 0x9d, 0x6f, 0x66, 0xbe, 0x73, 0xb2, + 0xe0, 0x9d, 0x4e, 0x27, 0x6c, 0x94, 0x4a, 0x19, 0x8f, 0x26, 0xf7, 0xc7, 0xcc, 0x90, 0xfb, 0x23, + 0x73, 0x36, 0x4c, 0x95, 0x34, 0x12, 0x5d, 0xb7, 0xb5, 0xa1, 0xad, 0x0d, 0x8b, 0x9a, 0x77, 0x27, + 0x94, 0x3a, 0x91, 0x3a, 0x70, 0x80, 0x51, 0x1e, 0xe4, 0xe8, 0xc1, 0xe7, 0xb0, 0x75, 0xa4, 0xa3, + 0x4f, 0x33, 0x41, 0x9f, 0x4a, 0x19, 0x23, 0x0c, 0x1b, 0xa1, 0x62, 0xc4, 0x48, 0x85, 0x5b, 0x3b, + 0xad, 0xbd, 0x8e, 0x3f, 0x0b, 0x51, 0x0f, 0x56, 0x39, 0xc5, 0xab, 0x3b, 0xad, 0xbd, 0x75, 0x7f, + 0x95, 0x53, 0xb4, 0x0d, 0x6d, 0x92, 0xc8, 0x4c, 0x18, 0xbc, 0xe6, 0x72, 0x45, 0x34, 0xb8, 0x05, + 0x37, 0x6a, 0x84, 0x3e, 0xd3, 0xa9, 0x14, 0x9a, 0x0d, 0xbe, 0x80, 0xee, 0x91, 0x8e, 0x1e, 0xb1, + 0x17, 0x97, 0xd7, 0xe9, 0x36, 0xdc, 0x9a, 0xa3, 0x2c, 0x7b, 0xfd, 0xb1, 0xe6, 0x9a, 0x1d, 0x58, + 0x3e, 0xe6, 0x9a, 0x7d, 0x02, 0x1d, 0x92, 0x99, 0x13, 0xa9, 0xb8, 0x99, 0xe6, 0xed, 0xf6, 0xf1, + 0xaf, 0x3f, 0x7f, 0x78, 0xb3, 0xb8, 0x8a, 0x87, 0x94, 0x2a, 0xa6, 0xf5, 0xb1, 0x51, 0x5c, 0x44, + 0x7e, 0x05, 0x45, 0x08, 0xd6, 0x05, 0x49, 0x98, 0x13, 0xd3, 0xf1, 0xdd, 0x6f, 0x2b, 0x5c, 0x65, + 0xc2, 0xf0, 0x84, 0x39, 0x3d, 0x1d, 0x7f, 0x16, 0x5a, 0x74, 0x2c, 0x23, 0x89, 0xd7, 0x73, 0xb4, + 0xfd, 0x6d, 0xc5, 0x87, 0x52, 0xbc, 0xe0, 0x11, 0xbe, 0xe2, 0xb2, 0x45, 0x84, 0xde, 0x82, 0x8e, + 0x36, 0x44, 0x99, 0xe0, 0x94, 0x4d, 0x71, 0xdb, 0x95, 0x36, 0x5d, 0xe2, 0x09, 0x9b, 0xa2, 0xf7, + 0xe0, 0x5a, 0x96, 0xc6, 0x92, 0xd0, 0x80, 0x0b, 0xc3, 0xd4, 0x84, 0xc4, 0x78, 0xc3, 0x1d, 0xbd, + 0x97, 0xa7, 0x0f, 0x8b, 0x2c, 0x7a, 0x17, 0x7a, 0x32, 0x65, 0x8a, 0x18, 0x2e, 0xa2, 0x20, 0x94, + 0xda, 0xe0, 0x4d, 0x87, 0xeb, 0x96, 0xd9, 0x03, 0xa9, 0x8d, 0x85, 0x25, 0x5c, 0x04, 0x94, 0xc5, + 0x2c, 0x22, 0x86, 0x4b, 0x81, 0x3b, 0x39, 0x2c, 0xe1, 0xe2, 0x51, 0x99, 0x44, 0x77, 0xe1, 0x5a, + 0x42, 0xce, 0x82, 0x71, 0x26, 0x68, 0xcc, 0x02, 0xcd, 0x5f, 0x32, 0x0c, 0x05, 0x8e, 0x9c, 0xed, + 0xbb, 0xec, 0x31, 0x7f, 0xe9, 0x6e, 0x60, 0xc2, 0x94, 0xb6, 0x3c, 0x5b, 0xf9, 0x0d, 0x14, 0x21, + 0xf2, 0x60, 0x73, 0xcc, 0x05, 0x51, 0x9c, 0x69, 0x7c, 0x35, 0x3f, 0xd4, 0x2c, 0x46, 0x43, 0xb8, + 0xa1, 0x8d, 0x54, 0x24, 0x62, 0xd6, 0x87, 0x13, 0x4e, 0x99, 0x0a, 0x38, 0xc5, 0xdd, 0x9d, 0xd6, + 0x5e, 0xd7, 0xbf, 0x5e, 0x94, 0x9e, 0x16, 0x95, 0x43, 0x6a, 0x45, 0x87, 0x32, 0x49, 0xed, 0xc3, + 0x70, 0x29, 0x2c, 0xb4, 0xe7, 0xa0, 0xdd, 0x5a, 0xf6, 0x90, 0x16, 0x2e, 0xa8, 0xde, 0xba, 0x74, + 0xc1, 0x77, 0xce, 0x04, 0xcf, 0x52, 0xfa, 0x7f, 0x4d, 0xf0, 0x57, 0x3f, 0x62, 0xd8, 0x48, 0xc9, + 0xd4, 0xbe, 0xc3, 0xcc, 0x00, 0x45, 0x58, 0x68, 0xa9, 0x5a, 0x96, 0x5a, 0x9e, 0x43, 0xcf, 0x5a, + 0x95, 0x6b, 0x32, 0x8e, 0x2f, 0x55, 0xcc, 0x00, 0xc3, 0xf6, 0x3c, 0x73, 0xd9, 0xf3, 0x2b, 0x77, + 0xfe, 0xc7, 0xe2, 0xd2, 0x5b, 0xe6, 0xa7, 0xac, 0x88, 0xcb, 0x8e, 0xe7, 0x2d, 0xb8, 0x73, 0xa4, + 0xa3, 0xe3, 0xf0, 0x84, 0xd1, 0x2c, 0x66, 0x7e, 0x3e, 0x16, 0xcf, 0xd2, 0x48, 0x11, 0xca, 0xfe, + 0x73, 0xfb, 0xda, 0xbc, 0xad, 0xce, 0xcf, 0x5b, 0xcd, 0x87, 0x6b, 0xf3, 0x3e, 0xdc, 0x85, 0xab, + 0xba, 0x50, 0x41, 0x03, 0x62, 0xdc, 0x44, 0xae, 0xfb, 0x5b, 0x65, 0xee, 0xa1, 0xb1, 0x56, 0xa5, + 0x99, 0xca, 0xa7, 0xe1, 0x8a, 0x2b, 0x97, 0xf1, 0x9c, 0x8d, 0xdb, 0xf3, 0x36, 0x1e, 0xbc, 0x03, + 0xbb, 0x8d, 0x67, 0x2c, 0x6f, 0xe2, 0x14, 0x6e, 0x5b, 0x53, 0x12, 0x11, 0xb2, 0xf8, 0x4d, 0x5f, + 0xc3, 0x60, 0x17, 0xde, 0x6e, 0x68, 0x36, 0xd3, 0xf3, 0xe0, 0xa7, 0x36, 0xac, 0x1d, 0xe9, 0x08, + 0xf9, 0xb0, 0x59, 0xae, 0xfa, 0xfe, 0x70, 0xe1, 0x1f, 0xc5, 0xb0, 0xb6, 0xb9, 0xbd, 0xbb, 0x7f, + 0x5f, 0x9f, 0x71, 0xa3, 0xe7, 0x00, 0xb5, 0xb5, 0xbe, 0xb3, 0xfc, 0xaf, 0x2a, 0x84, 0xb7, 0xf7, + 0x3a, 0x44, 0x9d, 0xb9, 0xb6, 0xc3, 0x1b, 0x98, 0x2b, 0x44, 0x13, 0xf3, 0xe2, 0x6e, 0xb0, 0xcc, + 0xb5, 0xc5, 0xd0, 0xc0, 0x5c, 0x21, 0x9a, 0x98, 0x17, 0x27, 0x1d, 0x7d, 0x0b, 0x5b, 0xf5, 0x31, + 0xdf, 0x6d, 0x38, 0x6c, 0x05, 0xf1, 0xee, 0xbd, 0x16, 0x52, 0x97, 0x5d, 0x9b, 0xe7, 0x06, 0xd9, + 0x15, 0xa2, 0x49, 0xf6, 0xe2, 0xe8, 0xa2, 0x1f, 0x60, 0xbb, 0x61, 0x6c, 0x3f, 0x58, 0xce, 0xb1, + 0x1c, 0xed, 0x7d, 0xfc, 0x6f, 0xd0, 0x65, 0xf7, 0x09, 0xdc, 0x5c, 0x3a, 0x2b, 0xef, 0x37, 0x3c, + 0xe8, 0x12, 0xac, 0xf7, 0xe0, 0x9f, 0x63, 0x67, 0x7d, 0xf7, 0x0f, 0x7e, 0x39, 0xef, 0xb7, 0x5e, + 0x9d, 0xf7, 0x5b, 0xbf, 0x9f, 0xf7, 0x5b, 0x3f, 0x5e, 0xf4, 0x57, 0x5e, 0x5d, 0xf4, 0x57, 0x7e, + 0xbb, 0xe8, 0xaf, 0x7c, 0x73, 0x2f, 0xe2, 0xe6, 0x24, 0x1b, 0x0f, 0x43, 0x99, 0x8c, 0x9e, 0x7c, + 0xfd, 0xe5, 0xe3, 0xcf, 0x98, 0xf9, 0x5e, 0xaa, 0xd3, 0x51, 0x78, 0x42, 0xb8, 0x18, 0x9d, 0xe5, + 0x9f, 0x5e, 0x66, 0x9a, 0x32, 0x3d, 0x6e, 0xbb, 0x0f, 0xa9, 0x8f, 0xfe, 0x0c, 0x00, 0x00, 0xff, + 0xff, 0x47, 0x15, 0xd0, 0xc0, 0x94, 0x09, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // FundPool ... + FundPool(ctx context.Context, in *MsgFundPool, opts ...grpc.CallOption) (*MsgFundPoolResponse, error) + // DefundPool ... + DefundPool(ctx context.Context, in *MsgDefundPool, opts ...grpc.CallOption) (*MsgDefundPoolResponse, error) + // CreatePool defines a governance operation for creating a new pool. + // The authority is hard-coded to the x/gov module account. + CreatePool(ctx context.Context, in *MsgCreatePool, opts ...grpc.CallOption) (*MsgCreatePoolResponse, error) + // UpdatePool defines a governance operation for updating an existing pool. + // The authority is hard-coded to the x/gov module account. + UpdatePool(ctx context.Context, in *MsgUpdatePool, opts ...grpc.CallOption) (*MsgUpdatePoolResponse, error) + // DisablePool defines a governance operation for disabling an existing pool. + // The authority is hard-coded to the x/gov module account. + DisablePool(ctx context.Context, in *MsgDisablePool, opts ...grpc.CallOption) (*MsgDisablePoolResponse, error) + // EnablePool defines a governance operation for enabling an existing pool. + // The authority is hard-coded to the x/gov module account. + EnablePool(ctx context.Context, in *MsgEnablePool, opts ...grpc.CallOption) (*MsgEnablePoolResponse, error) + // ScheduleRuntimeUpgrade defines a governance operation for scheduling a runtime upgrade. + // The authority is hard-coded to the x/gov module account. + ScheduleRuntimeUpgrade(ctx context.Context, in *MsgScheduleRuntimeUpgrade, opts ...grpc.CallOption) (*MsgScheduleRuntimeUpgradeResponse, error) + // CancelRuntimeUpgrade defines a governance operation for cancelling a runtime upgrade. + // The authority is hard-coded to the x/gov module account. + CancelRuntimeUpgrade(ctx context.Context, in *MsgCancelRuntimeUpgrade, opts ...grpc.CallOption) (*MsgCancelRuntimeUpgradeResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) FundPool(ctx context.Context, in *MsgFundPool, opts ...grpc.CallOption) (*MsgFundPoolResponse, error) { + out := new(MsgFundPoolResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/FundPool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DefundPool(ctx context.Context, in *MsgDefundPool, opts ...grpc.CallOption) (*MsgDefundPoolResponse, error) { + out := new(MsgDefundPoolResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/DefundPool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CreatePool(ctx context.Context, in *MsgCreatePool, opts ...grpc.CallOption) (*MsgCreatePoolResponse, error) { + out := new(MsgCreatePoolResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/CreatePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdatePool(ctx context.Context, in *MsgUpdatePool, opts ...grpc.CallOption) (*MsgUpdatePoolResponse, error) { + out := new(MsgUpdatePoolResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/UpdatePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DisablePool(ctx context.Context, in *MsgDisablePool, opts ...grpc.CallOption) (*MsgDisablePoolResponse, error) { + out := new(MsgDisablePoolResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/DisablePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) EnablePool(ctx context.Context, in *MsgEnablePool, opts ...grpc.CallOption) (*MsgEnablePoolResponse, error) { + out := new(MsgEnablePoolResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/EnablePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ScheduleRuntimeUpgrade(ctx context.Context, in *MsgScheduleRuntimeUpgrade, opts ...grpc.CallOption) (*MsgScheduleRuntimeUpgradeResponse, error) { + out := new(MsgScheduleRuntimeUpgradeResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/ScheduleRuntimeUpgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CancelRuntimeUpgrade(ctx context.Context, in *MsgCancelRuntimeUpgrade, opts ...grpc.CallOption) (*MsgCancelRuntimeUpgradeResponse, error) { + out := new(MsgCancelRuntimeUpgradeResponse) + err := c.cc.Invoke(ctx, "/kyve.pool.v1beta1.Msg/CancelRuntimeUpgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // FundPool ... + FundPool(context.Context, *MsgFundPool) (*MsgFundPoolResponse, error) + // DefundPool ... + DefundPool(context.Context, *MsgDefundPool) (*MsgDefundPoolResponse, error) + // CreatePool defines a governance operation for creating a new pool. + // The authority is hard-coded to the x/gov module account. + CreatePool(context.Context, *MsgCreatePool) (*MsgCreatePoolResponse, error) + // UpdatePool defines a governance operation for updating an existing pool. + // The authority is hard-coded to the x/gov module account. + UpdatePool(context.Context, *MsgUpdatePool) (*MsgUpdatePoolResponse, error) + // DisablePool defines a governance operation for disabling an existing pool. + // The authority is hard-coded to the x/gov module account. + DisablePool(context.Context, *MsgDisablePool) (*MsgDisablePoolResponse, error) + // EnablePool defines a governance operation for enabling an existing pool. + // The authority is hard-coded to the x/gov module account. + EnablePool(context.Context, *MsgEnablePool) (*MsgEnablePoolResponse, error) + // ScheduleRuntimeUpgrade defines a governance operation for scheduling a runtime upgrade. + // The authority is hard-coded to the x/gov module account. + ScheduleRuntimeUpgrade(context.Context, *MsgScheduleRuntimeUpgrade) (*MsgScheduleRuntimeUpgradeResponse, error) + // CancelRuntimeUpgrade defines a governance operation for cancelling a runtime upgrade. + // The authority is hard-coded to the x/gov module account. + CancelRuntimeUpgrade(context.Context, *MsgCancelRuntimeUpgrade) (*MsgCancelRuntimeUpgradeResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) FundPool(ctx context.Context, req *MsgFundPool) (*MsgFundPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FundPool not implemented") +} +func (*UnimplementedMsgServer) DefundPool(ctx context.Context, req *MsgDefundPool) (*MsgDefundPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DefundPool not implemented") +} +func (*UnimplementedMsgServer) CreatePool(ctx context.Context, req *MsgCreatePool) (*MsgCreatePoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreatePool not implemented") +} +func (*UnimplementedMsgServer) UpdatePool(ctx context.Context, req *MsgUpdatePool) (*MsgUpdatePoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdatePool not implemented") +} +func (*UnimplementedMsgServer) DisablePool(ctx context.Context, req *MsgDisablePool) (*MsgDisablePoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DisablePool not implemented") +} +func (*UnimplementedMsgServer) EnablePool(ctx context.Context, req *MsgEnablePool) (*MsgEnablePoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method EnablePool not implemented") +} +func (*UnimplementedMsgServer) ScheduleRuntimeUpgrade(ctx context.Context, req *MsgScheduleRuntimeUpgrade) (*MsgScheduleRuntimeUpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ScheduleRuntimeUpgrade not implemented") +} +func (*UnimplementedMsgServer) CancelRuntimeUpgrade(ctx context.Context, req *MsgCancelRuntimeUpgrade) (*MsgCancelRuntimeUpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelRuntimeUpgrade not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_FundPool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgFundPool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).FundPool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/FundPool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).FundPool(ctx, req.(*MsgFundPool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DefundPool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDefundPool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DefundPool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/DefundPool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DefundPool(ctx, req.(*MsgDefundPool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CreatePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreatePool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreatePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/CreatePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreatePool(ctx, req.(*MsgCreatePool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdatePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdatePool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdatePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/UpdatePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdatePool(ctx, req.(*MsgUpdatePool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DisablePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDisablePool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DisablePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/DisablePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DisablePool(ctx, req.(*MsgDisablePool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_EnablePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgEnablePool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).EnablePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/EnablePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).EnablePool(ctx, req.(*MsgEnablePool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ScheduleRuntimeUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgScheduleRuntimeUpgrade) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ScheduleRuntimeUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/ScheduleRuntimeUpgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ScheduleRuntimeUpgrade(ctx, req.(*MsgScheduleRuntimeUpgrade)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CancelRuntimeUpgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCancelRuntimeUpgrade) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CancelRuntimeUpgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.pool.v1beta1.Msg/CancelRuntimeUpgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CancelRuntimeUpgrade(ctx, req.(*MsgCancelRuntimeUpgrade)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.pool.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FundPool", + Handler: _Msg_FundPool_Handler, + }, + { + MethodName: "DefundPool", + Handler: _Msg_DefundPool_Handler, + }, + { + MethodName: "CreatePool", + Handler: _Msg_CreatePool_Handler, + }, + { + MethodName: "UpdatePool", + Handler: _Msg_UpdatePool_Handler, + }, + { + MethodName: "DisablePool", + Handler: _Msg_DisablePool_Handler, + }, + { + MethodName: "EnablePool", + Handler: _Msg_EnablePool_Handler, + }, + { + MethodName: "ScheduleRuntimeUpgrade", + Handler: _Msg_ScheduleRuntimeUpgrade_Handler, + }, + { + MethodName: "CancelRuntimeUpgrade", + Handler: _Msg_CancelRuntimeUpgrade_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/pool/v1beta1/tx.proto", +} + +func (m *MsgFundPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgFundPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgFundPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgFundPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgFundPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgFundPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDefundPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDefundPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDefundPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDefundPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDefundPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDefundPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCreatePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreatePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreatePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CompressionId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.CompressionId)) + i-- + dAtA[i] = 0x70 + } + if m.StorageProviderId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.StorageProviderId)) + i-- + dAtA[i] = 0x68 + } + if len(m.Binaries) > 0 { + i -= len(m.Binaries) + copy(dAtA[i:], m.Binaries) + i = encodeVarintTx(dAtA, i, uint64(len(m.Binaries))) + i-- + dAtA[i] = 0x62 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTx(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x5a + } + if m.MaxBundleSize != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.MaxBundleSize)) + i-- + dAtA[i] = 0x50 + } + if m.MinDelegation != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.MinDelegation)) + i-- + dAtA[i] = 0x48 + } + if m.OperatingCost != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.OperatingCost)) + i-- + dAtA[i] = 0x40 + } + if m.UploadInterval != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.UploadInterval)) + i-- + dAtA[i] = 0x38 + } + if len(m.StartKey) > 0 { + i -= len(m.StartKey) + copy(dAtA[i:], m.StartKey) + i = encodeVarintTx(dAtA, i, uint64(len(m.StartKey))) + i-- + dAtA[i] = 0x32 + } + if len(m.Config) > 0 { + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintTx(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x2a + } + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintTx(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintTx(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTx(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreatePoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreatePoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreatePoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdatePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdatePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdatePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTx(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x1a + } + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdatePoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdatePoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdatePoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDisablePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDisablePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDisablePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDisablePoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDisablePoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDisablePoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgEnablePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgEnablePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgEnablePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgEnablePoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgEnablePoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgEnablePoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgScheduleRuntimeUpgrade) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgScheduleRuntimeUpgrade) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgScheduleRuntimeUpgrade) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Binaries) > 0 { + i -= len(m.Binaries) + copy(dAtA[i:], m.Binaries) + i = encodeVarintTx(dAtA, i, uint64(len(m.Binaries))) + i-- + dAtA[i] = 0x32 + } + if m.Duration != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x28 + } + if m.ScheduledAt != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.ScheduledAt)) + i-- + dAtA[i] = 0x20 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTx(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintTx(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgScheduleRuntimeUpgradeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgScheduleRuntimeUpgradeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgScheduleRuntimeUpgradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCancelRuntimeUpgrade) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCancelRuntimeUpgrade) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCancelRuntimeUpgrade) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintTx(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCancelRuntimeUpgradeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCancelRuntimeUpgradeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCancelRuntimeUpgradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgFundPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgFundPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDefundPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgDefundPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCreatePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Config) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.StartKey) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.UploadInterval != 0 { + n += 1 + sovTx(uint64(m.UploadInterval)) + } + if m.OperatingCost != 0 { + n += 1 + sovTx(uint64(m.OperatingCost)) + } + if m.MinDelegation != 0 { + n += 1 + sovTx(uint64(m.MinDelegation)) + } + if m.MaxBundleSize != 0 { + n += 1 + sovTx(uint64(m.MaxBundleSize)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Binaries) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.StorageProviderId != 0 { + n += 1 + sovTx(uint64(m.StorageProviderId)) + } + if m.CompressionId != 0 { + n += 1 + sovTx(uint64(m.CompressionId)) + } + return n +} + +func (m *MsgCreatePoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdatePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdatePoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDisablePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + return n +} + +func (m *MsgDisablePoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgEnablePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + return n +} + +func (m *MsgEnablePoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgScheduleRuntimeUpgrade) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.ScheduledAt != 0 { + n += 1 + sovTx(uint64(m.ScheduledAt)) + } + if m.Duration != 0 { + n += 1 + sovTx(uint64(m.Duration)) + } + l = len(m.Binaries) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgScheduleRuntimeUpgradeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCancelRuntimeUpgrade) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgCancelRuntimeUpgradeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgFundPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgFundPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgFundPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgFundPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgFundPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgFundPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDefundPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDefundPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDefundPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDefundPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDefundPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDefundPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreatePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreatePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreatePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadInterval", wireType) + } + m.UploadInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UploadInterval |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingCost", wireType) + } + m.OperatingCost = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OperatingCost |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDelegation", wireType) + } + m.MinDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBundleSize", wireType) + } + m.MaxBundleSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBundleSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binaries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Binaries = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageProviderId", wireType) + } + m.StorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompressionId", wireType) + } + m.CompressionId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CompressionId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreatePoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreatePoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreatePoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdatePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdatePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdatePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdatePoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdatePoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdatePoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDisablePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDisablePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDisablePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDisablePoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDisablePoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDisablePoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgEnablePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgEnablePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgEnablePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgEnablePoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgEnablePoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgEnablePoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgScheduleRuntimeUpgrade) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgScheduleRuntimeUpgrade: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgScheduleRuntimeUpgrade: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduledAt", wireType) + } + m.ScheduledAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ScheduledAt |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binaries", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Binaries = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgScheduleRuntimeUpgradeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgScheduleRuntimeUpgradeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgScheduleRuntimeUpgradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCancelRuntimeUpgrade) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCancelRuntimeUpgrade: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCancelRuntimeUpgrade: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCancelRuntimeUpgradeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCancelRuntimeUpgradeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCancelRuntimeUpgradeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/pool/types/types.go b/x/pool/types/types.go new file mode 100644 index 00000000..ab1254f4 --- /dev/null +++ b/x/pool/types/types.go @@ -0,0 +1 @@ +package types diff --git a/x/query/client/cli/query.go b/x/query/client/cli/query.go new file mode 100644 index 00000000..e88a40c4 --- /dev/null +++ b/x/query/client/cli/query.go @@ -0,0 +1,56 @@ +package cli + +import ( + "fmt" + // "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + // "github.com/cosmos/cosmos-sdk/client/flags" + // sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/KYVENetwork/chain/x/query/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(queryRoute string) *cobra.Command { + // Group query queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + // Account + cmd.AddCommand(CmdAccountAssets()) + cmd.AddCommand(CmdAccountFundedList()) + cmd.AddCommand(CmdAccountDelegationUnbondings()) + cmd.AddCommand(CmdAccountRedelegation()) + + // Pool + cmd.AddCommand(CmdShowPool()) + cmd.AddCommand(CmdListPool()) + + // Staking + cmd.AddCommand(CmdShowStaker()) + cmd.AddCommand(CmdListStakers()) + cmd.AddCommand(CmdListStakersByPool()) + + // DELEGATION + cmd.AddCommand(CmdDelegator()) + cmd.AddCommand(CmdStakersByPoolAndDelegator()) + cmd.AddCommand(CmdDelegatorsByPoolAndStaker()) + + // Bundles + cmd.AddCommand(CmdShowFinalizedBundle()) + cmd.AddCommand(CmdListFinalizedBundles()) + cmd.AddCommand(CmdCanPropose()) + cmd.AddCommand(CmdCanVote()) + cmd.AddCommand(CmdCurrentVoteStatus()) + cmd.AddCommand(CmdCanValidate()) + + return cmd +} diff --git a/x/query/client/cli/query_account_assets.go b/x/query/client/cli/query_account_assets.go new file mode 100644 index 00000000..81f58c86 --- /dev/null +++ b/x/query/client/cli/query_account_assets.go @@ -0,0 +1,41 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdAccountAssets() *cobra.Command { + cmd := &cobra.Command{ + Use: "account-assets [address]", + Short: "Query account assets", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqAddress := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryAccountClient(clientCtx) + + params := &types.QueryAccountAssetsRequest{ + Address: reqAddress, + } + + res, err := queryClient.AccountAssets(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_account_delegation_unbondings.go b/x/query/client/cli/query_account_delegation_unbondings.go new file mode 100644 index 00000000..9f36b386 --- /dev/null +++ b/x/query/client/cli/query_account_delegation_unbondings.go @@ -0,0 +1,48 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdAccountDelegationUnbondings() *cobra.Command { + cmd := &cobra.Command{ + Use: "account-delegation-unbondings [address]", + Short: "Query all delegation unbondings of the given address", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqAddress := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryAccountClient(clientCtx) + + params := &types.QueryAccountDelegationUnbondingsRequest{ + Address: reqAddress, + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + params.Pagination = pageReq + + res, err := queryClient.AccountDelegationUnbondings(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_account_funded.go b/x/query/client/cli/query_account_funded.go new file mode 100644 index 00000000..f12272f2 --- /dev/null +++ b/x/query/client/cli/query_account_funded.go @@ -0,0 +1,41 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdAccountFundedList() *cobra.Command { + cmd := &cobra.Command{ + Use: "account-funded-list [address]", + Short: "Query all pools the given address is currently funding", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqAddress := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryAccountClient(clientCtx) + + params := &types.QueryAccountFundedListRequest{ + Address: reqAddress, + } + + res, err := queryClient.AccountFundedList(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_account_redelegation.go b/x/query/client/cli/query_account_redelegation.go new file mode 100644 index 00000000..24db4c1f --- /dev/null +++ b/x/query/client/cli/query_account_redelegation.go @@ -0,0 +1,41 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdAccountRedelegation() *cobra.Command { + cmd := &cobra.Command{ + Use: "account-redelegation [address]", + Short: "Query account-redelegation cooldown entries", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqAddress := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryAccountClient(clientCtx) + + params := &types.QueryAccountRedelegationRequest{ + Address: reqAddress, + } + + res, err := queryClient.AccountRedelegation(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_can_propose.go b/x/query/client/cli/query_can_propose.go new file mode 100644 index 00000000..252a8bfb --- /dev/null +++ b/x/query/client/cli/query_can_propose.go @@ -0,0 +1,56 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCanPropose() *cobra.Command { + cmd := &cobra.Command{ + Use: "can-propose [pool-id] [proposer] [from-height]", + Short: "Query if node can propose next bundle", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + reqProposer := args[1] + reqFromIndex, err := cast.ToUint64E(args[2]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryBundlesClient(clientCtx) + + params := &types.QueryCanProposeRequest{ + PoolId: reqId, + Proposer: reqProposer, + FromIndex: reqFromIndex, + } + + res, err := queryClient.CanPropose(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_can_validate.go b/x/query/client/cli/query_can_validate.go new file mode 100644 index 00000000..b2f42171 --- /dev/null +++ b/x/query/client/cli/query_can_validate.go @@ -0,0 +1,50 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCanValidate() *cobra.Command { + cmd := &cobra.Command{ + Use: "can-validate [pool_id] [valaddress]", + Short: "Query if current valaddress can vote in pool", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryBundlesClient(clientCtx) + + params := &types.QueryCanValidateRequest{ + PoolId: reqId, + Valaddress: args[1], + } + + res, err := queryClient.CanValidate(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_can_vote.go b/x/query/client/cli/query_can_vote.go new file mode 100644 index 00000000..37de6090 --- /dev/null +++ b/x/query/client/cli/query_can_vote.go @@ -0,0 +1,53 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCanVote() *cobra.Command { + cmd := &cobra.Command{ + Use: "can-vote [pool_id] [storage-id] [voter]", + Short: "Query if the current voter can vote on the current proposal", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + reqStorageId := args[1] + reqVoter := args[2] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryBundlesClient(clientCtx) + + params := &types.QueryCanVoteRequest{ + PoolId: reqId, + StorageId: reqStorageId, + Voter: reqVoter, + } + + res, err := queryClient.CanVote(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_current_vote_status.go b/x/query/client/cli/query_current_vote_status.go new file mode 100644 index 00000000..67751a6b --- /dev/null +++ b/x/query/client/cli/query_current_vote_status.go @@ -0,0 +1,49 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdCurrentVoteStatus() *cobra.Command { + cmd := &cobra.Command{ + Use: "current-vote-status [pool_id]", + Short: "Query current vote tally of pool", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryBundlesClient(clientCtx) + + params := &types.QueryCurrentVoteStatusRequest{ + PoolId: reqId, + } + + res, err := queryClient.CurrentVoteStatus(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_delegation_delegator.go b/x/query/client/cli/query_delegation_delegator.go new file mode 100644 index 00000000..1c144d15 --- /dev/null +++ b/x/query/client/cli/query_delegation_delegator.go @@ -0,0 +1,47 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdDelegator() *cobra.Command { + cmd := &cobra.Command{ + Use: "delegator [staker] [delegator]", + Short: "Query delegator of staker", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqStaker := args[0] + reqDelegator := args[1] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryDelegationClient(clientCtx) + + params := &types.QueryDelegatorRequest{ + Staker: reqStaker, + Delegator: reqDelegator, + } + + res, err := queryClient.Delegator(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_delegation_delegators_by_staker.go b/x/query/client/cli/query_delegation_delegators_by_staker.go new file mode 100644 index 00000000..ce6d00ed --- /dev/null +++ b/x/query/client/cli/query_delegation_delegators_by_staker.go @@ -0,0 +1,52 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdDelegatorsByPoolAndStaker() *cobra.Command { + cmd := &cobra.Command{ + Use: "delegators-by-staker [staker]", + Short: "Query all delegators for given staker", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqStaker := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryDelegationClient(clientCtx) + + params := &types.QueryDelegatorsByStakerRequest{ + Staker: reqStaker, + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + params.Pagination = pageReq + + res, err := queryClient.DelegatorsByStaker(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_delegation_stakers_by_delegator.go b/x/query/client/cli/query_delegation_stakers_by_delegator.go new file mode 100644 index 00000000..d6b37a19 --- /dev/null +++ b/x/query/client/cli/query_delegation_stakers_by_delegator.go @@ -0,0 +1,52 @@ +package cli + +import ( + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +var _ = strconv.Itoa(0) + +func CmdStakersByPoolAndDelegator() *cobra.Command { + cmd := &cobra.Command{ + Use: "stakers-by-delegator [delegator]", + Short: "Query all stakers a user has delegated to", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + reqDelegator := args[0] + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryDelegationClient(clientCtx) + + params := &types.QueryStakersByDelegatorRequest{ + Delegator: reqDelegator, + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + params.Pagination = pageReq + + res, err := queryClient.StakersByDelegator(cmd.Context(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_finalized_bundles.go b/x/query/client/cli/query_finalized_bundles.go new file mode 100644 index 00000000..1e3e2688 --- /dev/null +++ b/x/query/client/cli/query_finalized_bundles.go @@ -0,0 +1,94 @@ +package cli + +import ( + "context" + + "github.com/spf13/cast" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdListFinalizedBundles() *cobra.Command { + cmd := &cobra.Command{ + Use: "finalized-bundles [pool_id]", + Short: "list all finalized bundles of pool given by pool_id", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + poolId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryBundlesClient(clientCtx) + + params := &types.QueryFinalizedBundlesRequest{ + PoolId: poolId, + Pagination: pageReq, + } + + res, err := queryClient.FinalizedBundles(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowFinalizedBundle() *cobra.Command { + cmd := &cobra.Command{ + Use: "finalized-bundle [pool_id] [bundle-id]", + Short: "show the finalized bundle given by pool_id and bundle_id", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + poolId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + bundleId, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryBundlesClient(clientCtx) + + params := &types.QueryFinalizedBundleRequest{ + PoolId: poolId, + Id: bundleId, + } + + res, err := queryClient.FinalizedBundle(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_pool.go b/x/query/client/cli/query_pool.go new file mode 100644 index 00000000..21fd582b --- /dev/null +++ b/x/query/client/cli/query_pool.go @@ -0,0 +1,96 @@ +package cli + +import ( + "context" + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdListPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "pools", + Short: "list all pools", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + reqSearch := args[0] + reqRuntime := args[1] + + reqDisabled, err := cast.ToBoolE(args[2]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryPoolClient(clientCtx) + + params := &types.QueryPoolsRequest{ + Pagination: pageReq, + Search: reqSearch, + Runtime: reqRuntime, + Disabled: reqDisabled, + } + + res, err := queryClient.Pools(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "pool [id]", + Short: "shows a pool", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryPoolClient(clientCtx) + + id, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + + params := &types.QueryPoolRequest{ + Id: id, + } + + res, err := queryClient.Pool(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_staker.go b/x/query/client/cli/query_staker.go new file mode 100644 index 00000000..6af92541 --- /dev/null +++ b/x/query/client/cli/query_staker.go @@ -0,0 +1,78 @@ +package cli + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdListStakers() *cobra.Command { + cmd := &cobra.Command{ + Use: "stakers", + Short: "list all stakers", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + queryClient := types.NewQueryStakersClient(clientCtx) + + params := &types.QueryStakersRequest{ + Pagination: pageReq, + } + + res, err := queryClient.Stakers(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddPaginationFlagsToCmd(cmd, cmd.Use) + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func CmdShowStaker() *cobra.Command { + cmd := &cobra.Command{ + Use: "staker [address]", + Short: "shows all necessary information for staker", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + queryClient := types.NewQueryStakersClient(clientCtx) + + params := &types.QueryStakerRequest{ + Address: args[0], + } + + res, err := queryClient.Staker(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/client/cli/query_stakers_by_pool.go b/x/query/client/cli/query_stakers_by_pool.go new file mode 100644 index 00000000..d6adea9f --- /dev/null +++ b/x/query/client/cli/query_stakers_by_pool.go @@ -0,0 +1,47 @@ +package cli + +import ( + "context" + "strconv" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdListStakersByPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "stakers-by-pool", + Short: "list all stakers", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + id, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + + queryClient := types.NewQueryStakersClient(clientCtx) + + params := &types.QueryStakersByPoolRequest{ + PoolId: id, + } + + res, err := queryClient.StakersByPool(context.Background(), params) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/query/keeper/grpc_account_asssets.go b/x/query/keeper/grpc_account_asssets.go new file mode 100644 index 00000000..b5c30b45 --- /dev/null +++ b/x/query/keeper/grpc_account_asssets.go @@ -0,0 +1,82 @@ +package keeper + +import ( + "context" + + globalTypes "github.com/KYVENetwork/chain/x/global/types" + + "github.com/KYVENetwork/chain/util" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// AccountAssets returns an overview of the balances of the given user regarding the protocol nodes +// This includes the current balance, funding, staking, and delegation. +func (k Keeper) AccountAssets(goCtx context.Context, req *types.QueryAccountAssetsRequest) (*types.QueryAccountAssetsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + response := types.QueryAccountAssetsResponse{} + + // ======= + // Balance + // ======= + account, _ := sdk.AccAddressFromBech32(req.Address) + balance := k.bankKeeper.GetBalance(ctx, account, globalTypes.Denom) + response.Balance = balance.Amount.Uint64() + + // ====================== + // ProtocolSelfDelegation + // ====================== + + response.ProtocolSelfDelegation = k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, req.Address, req.Address) + + // ================================================ + // ProtocolDelegation + ProtocolDelegationUnbonding + // ================================================ + + // Iterate all Delegator entries + delegatorStore := prefix.NewStore( + ctx.KVStore(k.delegationKeeper.StoreKey()), + util.GetByteKey(delegationtypes.DelegatorKeyPrefixIndex2, req.Address)) + delegatorIterator := sdk.KVStorePrefixIterator(delegatorStore, nil) + defer delegatorIterator.Close() + + for ; delegatorIterator.Valid(); delegatorIterator.Next() { + + staker := string(delegatorIterator.Key()[0:43]) + + response.ProtocolDelegation += k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, staker, req.Address) + response.ProtocolRewards += k.delegationKeeper.GetOutstandingRewards(ctx, staker, req.Address) + } + + // ====================================================== + // Delegation Unbonding + ProtocolSelfDelegationUnbonding + // ====================================================== + + // Iterate all UnbondingDelegation entries to get total delegation unbonding amount + for _, entry := range k.delegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(ctx, req.Address) { + response.ProtocolDelegationUnbonding += entry.Amount + if entry.Staker == req.Address { + response.ProtocolSelfDelegationUnbonding += entry.Amount + } + } + + // =============== + // ProtocolFunding + // =============== + + // Iterate all pools and look if the user is funding + for _, pool := range k.poolKeeper.GetAllPools(ctx) { + response.ProtocolFunding += pool.GetFunderAmount(req.Address) + } + + return &response, nil +} diff --git a/x/query/keeper/grpc_account_delegation_unbondings.go b/x/query/keeper/grpc_account_delegation_unbondings.go new file mode 100644 index 00000000..e672d53b --- /dev/null +++ b/x/query/keeper/grpc_account_delegation_unbondings.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "context" + "encoding/binary" + + "github.com/KYVENetwork/chain/util" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) AccountDelegationUnbondings(goCtx context.Context, req *types.QueryAccountDelegationUnbondingsRequest) (*types.QueryAccountDelegationUnbondingsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + var delegationUnbondings []types.DelegationUnbonding + + store := prefix.NewStore(ctx.KVStore(k.delegationKeeper.StoreKey()), util.GetByteKey(delegationtypes.UndelegationQueueKeyPrefixIndex2, req.Address)) + pageRes, err := query.FilteredPaginate(store, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + if accumulate { + index := binary.BigEndian.Uint64(key[0:8]) + unbondingEntry, _ := k.delegationKeeper.GetUndelegationQueueEntry(ctx, index) + + delegationUnbondings = append(delegationUnbondings, types.DelegationUnbonding{ + Amount: unbondingEntry.Amount, + CreationTime: unbondingEntry.CreationTime, + Staker: k.GetFullStaker(ctx, unbondingEntry.Staker), + }) + } + return true, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryAccountDelegationUnbondingsResponse{ + Unbondings: delegationUnbondings, + Pagination: pageRes, + }, nil +} diff --git a/x/query/keeper/grpc_account_funded.go b/x/query/keeper/grpc_account_funded.go new file mode 100644 index 00000000..f5355a6e --- /dev/null +++ b/x/query/keeper/grpc_account_funded.go @@ -0,0 +1,40 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) AccountFundedList(goCtx context.Context, req *types.QueryAccountFundedListRequest) (*types.QueryAccountFundedListResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + var funded []types.Funded + + for _, pool := range k.poolKeeper.GetAllPools(ctx) { + funded = append(funded, types.Funded{ + Amount: pool.GetFunderAmount(req.Address), + Pool: &types.BasicPool{ + Id: pool.Id, + Name: pool.Name, + Runtime: pool.Runtime, + Logo: pool.Logo, + OperatingCost: pool.OperatingCost, + UploadInterval: pool.UploadInterval, + TotalFunds: pool.TotalFunds, + TotalDelegation: k.delegationKeeper.GetDelegationOfPool(ctx, pool.Id), + Status: k.GetPoolStatus(ctx, &pool), + }, + }) + } + + return &types.QueryAccountFundedListResponse{ + Funded: funded, + }, nil +} diff --git a/x/query/keeper/grpc_account_redelegation.go b/x/query/keeper/grpc_account_redelegation.go new file mode 100644 index 00000000..d6bac7d7 --- /dev/null +++ b/x/query/keeper/grpc_account_redelegation.go @@ -0,0 +1,38 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) AccountRedelegation(goCtx context.Context, req *types.QueryAccountRedelegationRequest) (*types.QueryAccountRedelegationResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(goCtx) + + var redelegationEntries []types.RedelegationEntry + usedSlots := uint64(0) + + for _, creationDate := range k.delegationKeeper.GetRedelegationCooldownEntries(ctx, req.Address) { + + finishDate := creationDate + k.delegationKeeper.GetRedelegationCooldown(ctx) + + if finishDate >= uint64(ctx.BlockTime().Unix()) { + redelegationEntries = append(redelegationEntries, types.RedelegationEntry{ + CreationDate: creationDate, + FinishDate: finishDate, + }) + usedSlots += 1 + } + } + + return &types.QueryAccountRedelegationResponse{ + RedelegationCooldownEntries: redelegationEntries, + AvailableSlots: k.delegationKeeper.GetRedelegationMaxAmount(ctx) - usedSlots, + }, nil +} diff --git a/x/query/keeper/grpc_account_redelegation_test.go b/x/query/keeper/grpc_account_redelegation_test.go new file mode 100644 index 00000000..adf0a9fe --- /dev/null +++ b/x/query/keeper/grpc_account_redelegation_test.go @@ -0,0 +1,149 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + querytypes "github.com/KYVENetwork/chain/x/query/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - grpc_account_redelegation.go + +* Call can validate if pool does not exist + +*/ + +var _ = Describe("grpc_account_redelegation.go", Ordered, func() { + s := i.NewCleanChain() + + redelegationCooldown := s.App().DelegationKeeper.GetRedelegationCooldown(s.Ctx()) + redelegationMaxAmount := s.App().DelegationKeeper.GetRedelegationMaxAmount(s.Ctx()) + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + MinDelegation: 200 * i.KYVE, + UploadInterval: 60, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{}, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "DisabledPool", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + Disabled: true, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 50 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Single redelegation", func() { + // ACT + s.RunTxDelegatorSuccess(&delegationtypes.MsgRedelegate{ + Creator: i.ALICE, + FromStaker: i.STAKER_0, + ToStaker: i.STAKER_1, + Amount: 10 * i.KYVE, + }) + + // ASSERT + res, err := s.App().QueryKeeper.AccountRedelegation(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryAccountRedelegationRequest{Address: i.ALICE}) + Expect(err).To(BeNil()) + + Expect(res.AvailableSlots).To(Equal(uint64(4))) + Expect(res.RedelegationCooldownEntries).To(HaveLen(1)) + Expect(res.RedelegationCooldownEntries[0].CreationDate).To(Equal(uint64(s.Ctx().BlockTime().Unix()))) + Expect(res.RedelegationCooldownEntries[0].FinishDate).To(Equal(redelegationCooldown + uint64(s.Ctx().BlockTime().Unix()))) + }) + + It("Await single redelegation", func() { + // ACT + s.RunTxDelegatorSuccess(&delegationtypes.MsgRedelegate{ + Creator: i.ALICE, + FromStaker: i.STAKER_0, + ToStaker: i.STAKER_1, + Amount: 10 * i.KYVE, + }) + s.CommitAfterSeconds(redelegationCooldown + 1) + + // Assert + + res, err := s.App().QueryKeeper.AccountRedelegation(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryAccountRedelegationRequest{Address: i.ALICE}) + Expect(err).To(BeNil()) + + Expect(res.AvailableSlots).To(Equal(uint64(5))) + Expect(res.RedelegationCooldownEntries).To(HaveLen(0)) + }) + + It("Exhaust all redelegation", func() { + // Arrange + redelegationMsg := &delegationtypes.MsgRedelegate{ + Creator: i.ALICE, + FromStaker: i.STAKER_0, + ToStaker: i.STAKER_1, + Amount: 10 * i.KYVE, + } + + // ACT + for i := uint64(0); i < redelegationMaxAmount; i++ { + s.RunTxDelegatorSuccess(redelegationMsg) + s.CommitAfterSeconds(1) + } + // Assert + + res, err := s.App().QueryKeeper.AccountRedelegation(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryAccountRedelegationRequest{Address: i.ALICE}) + Expect(err).To(BeNil()) + + Expect(res.AvailableSlots).To(Equal(uint64(0))) + Expect(res.RedelegationCooldownEntries).To(HaveLen(5)) + + for i := uint64(0); i < redelegationMaxAmount; i++ { + Expect(res.RedelegationCooldownEntries[i].CreationDate).To(Equal(uint64(s.Ctx().BlockTime().Unix()) - redelegationMaxAmount + i)) + } + }) +}) diff --git a/x/query/keeper/grpc_current_vote_status.go b/x/query/keeper/grpc_current_vote_status.go new file mode 100644 index 00000000..b1db9e83 --- /dev/null +++ b/x/query/keeper/grpc_current_vote_status.go @@ -0,0 +1,33 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) CurrentVoteStatus(c context.Context, req *types.QueryCurrentVoteStatusRequest) (*types.QueryCurrentVoteStatusResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + _, found := k.poolKeeper.GetPool(ctx, req.PoolId) + if !found { + return nil, sdkErrors.ErrKeyNotFound + } + + voteDistribution := k.bundleKeeper.GetVoteDistribution(ctx, req.PoolId) + + return &types.QueryCurrentVoteStatusResponse{ + Valid: voteDistribution.Valid, + Invalid: voteDistribution.Invalid, + Abstain: voteDistribution.Abstain, + Total: voteDistribution.Total, + }, nil +} diff --git a/x/query/keeper/grpc_delegation_delegator.go b/x/query/keeper/grpc_delegation_delegator.go new file mode 100644 index 00000000..3af9f32a --- /dev/null +++ b/x/query/keeper/grpc_delegation_delegator.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// The Delegator query returns the outstanding rewards and the total delegation amount of a +// delegator for its staker. +// If the delegator is not a staker both amounts will be zero. +// The request does not error. +func (k Keeper) Delegator(goCtx context.Context, req *types.QueryDelegatorRequest) (*types.QueryDelegatorResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(goCtx) + + response := types.QueryDelegatorResponse{} + response.Delegator = &types.StakerDelegatorResponse{ + Delegator: req.Delegator, + CurrentReward: k.delegationKeeper.GetOutstandingRewards(ctx, req.Staker, req.Delegator), + DelegationAmount: k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, req.Staker, req.Delegator), + Staker: req.Staker, + } + + return &response, nil +} diff --git a/x/query/keeper/grpc_delegation_delegators_by_staker.go b/x/query/keeper/grpc_delegation_delegators_by_staker.go new file mode 100644 index 00000000..8891ea0b --- /dev/null +++ b/x/query/keeper/grpc_delegation_delegators_by_staker.go @@ -0,0 +1,56 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) DelegatorsByStaker(goCtx context.Context, req *types.QueryDelegatorsByStakerRequest) (*types.QueryDelegatorsByStakerResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(goCtx) + + var delegators []types.StakerDelegatorResponse + + store := ctx.KVStore(k.delegationKeeper.StoreKey()) + delegatorStore := prefix.NewStore(store, util.GetByteKey(delegationtypes.DelegatorKeyPrefix, req.Staker)) + + pageRes, err := query.FilteredPaginate(delegatorStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + if accumulate { + var delegator delegationtypes.Delegator + + if err := k.cdc.Unmarshal(value, &delegator); err != nil { + return false, nil + } + + delegators = append(delegators, types.StakerDelegatorResponse{ + Delegator: delegator.Delegator, + CurrentReward: k.delegationKeeper.GetOutstandingRewards(ctx, req.Staker, delegator.Delegator), + DelegationAmount: k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, req.Staker, delegator.Delegator), + Staker: req.Staker, + }) + } + return true, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + delegationData, _ := k.delegationKeeper.GetDelegationData(ctx, req.Staker) + + return &types.QueryDelegatorsByStakerResponse{ + Delegators: delegators, + TotalDelegation: delegationData.TotalDelegation, + TotalDelegatorCount: delegationData.DelegatorCount, + Pagination: pageRes, + }, nil +} diff --git a/x/query/keeper/grpc_delegation_stakers_by_delegator.go b/x/query/keeper/grpc_delegation_stakers_by_delegator.go new file mode 100644 index 00000000..6ecebaa6 --- /dev/null +++ b/x/query/keeper/grpc_delegation_stakers_by_delegator.go @@ -0,0 +1,49 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) StakersByDelegator(goCtx context.Context, req *types.QueryStakersByDelegatorRequest) (*types.QueryStakersByDelegatorResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(goCtx) + + var stakers []types.DelegationForStakerResponse + + delegatorStore := prefix.NewStore(ctx.KVStore(k.delegationKeeper.StoreKey()), util.GetByteKey(delegationtypes.DelegatorKeyPrefixIndex2, req.Delegator)) + + pageRes, err := query.FilteredPaginate(delegatorStore, req.Pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + if accumulate { + staker := string(key[0:43]) + + stakers = append(stakers, types.DelegationForStakerResponse{ + Staker: k.GetFullStaker(ctx, staker), + CurrentReward: k.delegationKeeper.GetOutstandingRewards(ctx, staker, req.Delegator), + DelegationAmount: k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, staker, req.Delegator), + }) + } + + return true, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return &types.QueryStakersByDelegatorResponse{ + Delegator: req.Delegator, + Stakers: stakers, + Pagination: pageRes, + }, nil +} diff --git a/x/query/keeper/grpc_params.go b/x/query/keeper/grpc_params.go new file mode 100644 index 00000000..cec84da6 --- /dev/null +++ b/x/query/keeper/grpc_params.go @@ -0,0 +1,36 @@ +package keeper + +import ( + "context" + + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Query + "github.com/KYVENetwork/chain/x/query/types" +) + +func (k Keeper) Params(goCtx context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(goCtx) + + bp := k.bundleKeeper.GetParams(ctx) + dp := k.delegationKeeper.GetParams(ctx) + globalParams := k.globalKeeper.GetParams(ctx) + govParams := govTypes.QueryParamsResponse{} + sp := k.stakerKeeper.GetParams(ctx) + + govVotingParams := k.govKeeper.GetVotingParams(ctx) + govParams.VotingParams = &govVotingParams + govDepositParams := k.govKeeper.GetDepositParams(ctx) + govParams.DepositParams = &govDepositParams + govTallyParams := k.govKeeper.GetTallyParams(ctx) + govParams.TallyParams = &govTallyParams + + return &types.QueryParamsResponse{BundlesParams: &bp, DelegationParams: &dp, GlobalParams: &globalParams, GovParams: &govParams, StakersParams: &sp}, nil +} diff --git a/x/query/keeper/grpc_query.go b/x/query/keeper/grpc_query.go new file mode 100644 index 00000000..efa8a9d1 --- /dev/null +++ b/x/query/keeper/grpc_query.go @@ -0,0 +1,14 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/query/types" +) + +var ( + _ types.QueryAccountServer = Keeper{} + _ types.QueryPoolServer = Keeper{} + _ types.QueryStakersServer = Keeper{} + _ types.QueryDelegationServer = Keeper{} + _ types.QueryBundlesServer = Keeper{} + _ types.QueryParamsServer = Keeper{} +) diff --git a/x/query/keeper/grpc_query_can_propose.go b/x/query/keeper/grpc_query_can_propose.go new file mode 100644 index 00000000..9cfc66de --- /dev/null +++ b/x/query/keeper/grpc_query_can_propose.go @@ -0,0 +1,30 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) CanPropose(c context.Context, req *types.QueryCanProposeRequest) (*types.QueryCanProposeResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + if err := k.bundleKeeper.AssertCanPropose(ctx, req.PoolId, req.Staker, req.Proposer, req.FromIndex); err != nil { + return &types.QueryCanProposeResponse{ + Possible: false, + Reason: err.Error(), + }, nil + } + + return &types.QueryCanProposeResponse{ + Possible: true, + Reason: "", + }, nil +} diff --git a/x/query/keeper/grpc_query_can_propose_test.go b/x/query/keeper/grpc_query_can_propose_test.go new file mode 100644 index 00000000..afb89589 --- /dev/null +++ b/x/query/keeper/grpc_query_can_propose_test.go @@ -0,0 +1,556 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + querytypes "github.com/KYVENetwork/chain/x/query/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - grpc_query_can_propose.go + +* Call can propose if pool does not exist +* Call can propose if pool is currently upgrading +* Call can propose if pool is disabled +* Call can propose if pool is out of funds +* Call can propose if pool has not reached the minimum stake +* Call can propose with a valaccount which does not exist +* Call can propose as a staker who is not the next uploader +* Call can propose before the upload interval passed +* Call can propose with an invalid from height +* Call can propose on an active pool as the next uploader with valid args + +*/ + +var _ = Describe("grpc_query_can_propose.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + MinDelegation: 200 * i.KYVE, + UploadInterval: 60, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{}, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 0, + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + s.CommitAfterSeconds(60) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Call can propose if pool does not exist", func() { + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 1, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(sdkErrors.Wrapf(sdkErrors.ErrNotFound, pooltypes.ErrPoolNotFound.Error(), 1).Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 1, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose if pool is currently upgrading", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + pool.UpgradePlan = &pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: 100, + Duration: 3600, + } + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(bundletypes.ErrPoolCurrentlyUpgrading.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose if pool is disabled", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + pool.Disabled = true + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(bundletypes.ErrPoolDisabled.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose if pool is out of funds", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(bundletypes.ErrPoolOutOfFunds.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose if pool has not reached the minimum stake", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgUndelegate{ + Creator: i.STAKER_0, + Staker: i.STAKER_0, + Amount: 50 * i.KYVE, + }) + + // wait for unbonding + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx())) + s.CommitAfterSeconds(1) + + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(bundletypes.ErrMinDelegationNotReached.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose with a valaccount which does not exist", func() { + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_0, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(stakertypes.ErrValaccountUnauthorized.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose as a staker who is not the next uploader", func() { + // ARRANGE + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + var canPropose *querytypes.QueryCanProposeResponse + var err error + + // ACT + if bundleProposal.NextUploader == i.STAKER_0 { + canPropose, err = s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + } else { + canPropose, err = s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_0, + Proposer: i.VALADDRESS_0, + FromIndex: 100, + }) + } + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeFalse()) + + if bundleProposal.NextUploader == i.STAKER_0 { + Expect(canPropose.Reason).To(Equal(sdkErrors.Wrapf(bundletypes.ErrNotDesignatedUploader, "expected %v received %v", i.STAKER_0, i.STAKER_1).Error())) + } else { + Expect(canPropose.Reason).To(Equal(sdkErrors.Wrapf(bundletypes.ErrNotDesignatedUploader, "expected %v received %v", i.STAKER_1, i.STAKER_0).Error())) + } + + var txErr error + + if bundleProposal.NextUploader == i.STAKER_0 { + _, txErr = s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + } else { + _, txErr = s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + } + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose before the upload interval passed", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + // increase upload interval for upload timeout + pool.UploadInterval = 120 + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(canPropose.Possible).To(BeFalse()) + Expect(canPropose.Reason).To(Equal(sdkErrors.Wrapf(bundletypes.ErrUploadInterval, "expected %v < %v", s.Ctx().BlockTime().Unix(), bundleProposal.UpdatedAt+pool.UploadInterval).Error())) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canPropose.Reason)) + }) + + It("Call can propose with an invalid from index", func() { + // ACT + canPropose_1, err_1 := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 99, + }) + + canPropose_2, err_2 := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 101, + }) + + // ASSERT + Expect(err_1).To(BeNil()) + Expect(err_2).To(BeNil()) + + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + bundleProposal, _ := s.App().BundlesKeeper.GetBundleProposal(s.Ctx(), 0) + + Expect(canPropose_1.Possible).To(BeFalse()) + Expect(canPropose_1.Reason).To(Equal(sdkErrors.Wrapf(bundletypes.ErrFromIndex, "expected %v received %v", pool.CurrentIndex+bundleProposal.BundleSize, 99).Error())) + + Expect(canPropose_2.Possible).To(BeFalse()) + Expect(canPropose_2.Reason).To(Equal(sdkErrors.Wrapf(bundletypes.ErrFromIndex, "expected %v received %v", pool.CurrentIndex+bundleProposal.BundleSize, 101).Error())) + + _, txErr_1 := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 99, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr_1).NotTo(BeNil()) + Expect(txErr_1.Error()).To(Equal(canPropose_1.Reason)) + + _, txErr_2 := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 101, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr_2).NotTo(BeNil()) + Expect(txErr_2.Error()).To(Equal(canPropose_2.Reason)) + }) + + It("Call can propose on an active pool as the next uploader with valid args", func() { + // ACT + canPropose, err := s.App().QueryKeeper.CanPropose(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanProposeRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Proposer: i.VALADDRESS_1, + FromIndex: 100, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canPropose.Possible).To(BeTrue()) + Expect(canPropose.Reason).To(BeEmpty()) + + _, txErr := s.RunTx(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + FromIndex: 100, + BundleSize: 100, + FromKey: "100", + ToKey: "199", + BundleSummary: "test_value", + }) + + Expect(txErr).To(BeNil()) + }) +}) diff --git a/x/query/keeper/grpc_query_can_validate.go b/x/query/keeper/grpc_query_can_validate.go new file mode 100644 index 00000000..9617ea48 --- /dev/null +++ b/x/query/keeper/grpc_query_can_validate.go @@ -0,0 +1,47 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) CanValidate(c context.Context, req *types.QueryCanValidateRequest) (*types.QueryCanValidateResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + if _, err := k.poolKeeper.GetPoolWithError(ctx, req.PoolId); err != nil { + return &types.QueryCanValidateResponse{ + Possible: false, + Reason: err.Error(), + }, nil + } + + var staker string + + // Check if valaddress has a valaccount in pool + for _, valaccount := range k.stakerKeeper.GetAllValaccountsOfPool(ctx, req.PoolId) { + if valaccount.Valaddress == req.Valaddress { + staker = valaccount.Staker + break + } + } + + if staker == "" { + return &types.QueryCanValidateResponse{ + Possible: false, + Reason: "no valaccount found", + }, nil + } + + return &types.QueryCanValidateResponse{ + Possible: true, + Reason: staker, + }, nil +} diff --git a/x/query/keeper/grpc_query_can_validate_test.go b/x/query/keeper/grpc_query_can_validate_test.go new file mode 100644 index 00000000..05c0b853 --- /dev/null +++ b/x/query/keeper/grpc_query_can_validate_test.go @@ -0,0 +1,133 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + querytypes "github.com/KYVENetwork/chain/x/query/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - grpc_query_can_validate.go + +* Call can validate if pool does not exist +* Call can validate if valaddress does not exist +* Call can validate with a valaddress which belongs to another pool +* Call can validate with a valid valaddress + +*/ + +var _ = Describe("grpc_query_can_validate.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + MinDelegation: 200 * i.KYVE, + UploadInterval: 60, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{}, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest2", + MinDelegation: 200 * i.KYVE, + UploadInterval: 60, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{}, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 1, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Call can validate if pool does not exist", func() { + // ACT + canValidate, err := s.App().QueryKeeper.CanValidate(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanValidateRequest{ + PoolId: 2, + Valaddress: i.VALADDRESS_0, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canValidate.Possible).To(BeFalse()) + Expect(canValidate.Reason).To(Equal(sdkErrors.Wrapf(sdkErrors.ErrNotFound, pooltypes.ErrPoolNotFound.Error(), 2).Error())) + }) + + It("Call can validate if valaddress does not exist", func() { + // ACT + canValidate, err := s.App().QueryKeeper.CanValidate(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanValidateRequest{ + PoolId: 0, + Valaddress: i.VALADDRESS_2, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canValidate.Possible).To(BeFalse()) + Expect(canValidate.Reason).To(Equal("no valaccount found")) + }) + + It("Call can validate with a valaddress which belongs to another pool", func() { + // ACT + canValidate, err := s.App().QueryKeeper.CanValidate(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanValidateRequest{ + PoolId: 0, + Valaddress: i.VALADDRESS_1, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canValidate.Possible).To(BeFalse()) + Expect(canValidate.Reason).To(Equal("no valaccount found")) + }) + + It("Call can validate with a valid valaddress", func() { + // ACT + canValidate, err := s.App().QueryKeeper.CanValidate(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanValidateRequest{ + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canValidate.Possible).To(BeTrue()) + Expect(canValidate.Reason).To(Equal(i.STAKER_0)) + }) +}) diff --git a/x/query/keeper/grpc_query_can_vote.go b/x/query/keeper/grpc_query_can_vote.go new file mode 100644 index 00000000..944f88c9 --- /dev/null +++ b/x/query/keeper/grpc_query_can_vote.go @@ -0,0 +1,41 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) CanVote(c context.Context, req *types.QueryCanVoteRequest) (*types.QueryCanVoteResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + if err := k.bundleKeeper.AssertCanVote(ctx, req.PoolId, req.Staker, req.Voter, req.StorageId); err != nil { + return &types.QueryCanVoteResponse{ + Possible: false, + Reason: err.Error(), + }, nil + } + + bundleProposal, _ := k.bundleKeeper.GetBundleProposal(ctx, req.PoolId) + hasVotedAbstain := util.ContainsString(bundleProposal.VotersAbstain, req.Staker) + + if hasVotedAbstain { + return &types.QueryCanVoteResponse{ + Possible: true, + Reason: "KYVE_VOTE_NO_ABSTAIN_ALLOWED", + }, nil + } + + return &types.QueryCanVoteResponse{ + Possible: true, + Reason: "", + }, nil +} diff --git a/x/query/keeper/grpc_query_can_vote_test.go b/x/query/keeper/grpc_query_can_vote_test.go new file mode 100644 index 00000000..922bf2a3 --- /dev/null +++ b/x/query/keeper/grpc_query_can_vote_test.go @@ -0,0 +1,500 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + bundletypes "github.com/KYVENetwork/chain/x/bundles/types" + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + querytypes "github.com/KYVENetwork/chain/x/query/types" + stakertypes "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - grpc_query_can_vote.go + +* Call can vote if pool does not exist +* Call can vote if pool is currently upgrading +* Call can vote if pool is disabled +* Call can vote if pool is out of funds +* Call can vote if pool has not reached the minimum stake +* Call can vote with a valaccount which does not exist +* Call can vote if current bundle was dropped +* Call can vote with a different storage id than the current one +* Call can vote if voter has already voted valid +* Call can vote if voter has already voted invalid +* Call can vote if voter has already voted abstain +* Call can vote on an active pool with a data bundle with valid args + +*/ + +var _ = Describe("grpc_query_can_vote.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + s = i.NewCleanChain() + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + MinDelegation: 200 * i.KYVE, + UploadInterval: 60, + MaxBundleSize: 100, + Protocol: &pooltypes.Protocol{}, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxPoolSuccess(&pooltypes.MsgFundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakertypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + s.RunTxBundlesSuccess(&bundletypes.MsgClaimUploaderRole{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + }) + + s.CommitAfterSeconds(60) + + s.RunTxBundlesSuccess(&bundletypes.MsgSubmitBundleProposal{ + Creator: i.VALADDRESS_0, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "test_storage_id", + DataSize: 100, + DataHash: "test_hash", + BundleSize: 100, + FromKey: "0", + ToKey: "99", + BundleSummary: "test_value", + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Call can vote if pool does not exist", func() { + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 1, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(sdkErrors.Wrapf(sdkErrors.ErrNotFound, pooltypes.ErrPoolNotFound.Error(), 1).Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 1, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if pool is currently upgrading", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + pool.UpgradePlan = &pooltypes.UpgradePlan{ + Version: "1.0.0", + Binaries: "{}", + ScheduledAt: 100, + Duration: 3600, + } + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrPoolCurrentlyUpgrading.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if pool is disabled", func() { + // ARRANGE + pool, _ := s.App().PoolKeeper.GetPool(s.Ctx(), 0) + pool.Disabled = true + + s.App().PoolKeeper.SetPool(s.Ctx(), pool) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrPoolDisabled.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if pool is out of funds", func() { + // ARRANGE + s.RunTxPoolSuccess(&pooltypes.MsgDefundPool{ + Creator: i.ALICE, + Id: 0, + Amount: 100 * i.KYVE, + }) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrPoolOutOfFunds.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if pool has not reached the minimum stake", func() { + // ARRANGE + s.RunTxDelegatorSuccess(&delegationtypes.MsgUndelegate{ + Creator: i.STAKER_0, + Staker: i.STAKER_0, + Amount: 50 * i.KYVE, + }) + + // wait for unbonding + s.CommitAfterSeconds(s.App().DelegationKeeper.GetUnbondingDelegationTime(s.Ctx())) + s.CommitAfterSeconds(1) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrMinDelegationNotReached.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote with a valaccount which does not exist", func() { + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_0, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(stakertypes.ErrValaccountUnauthorized.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_0, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if previous bundle was dropped", func() { + // ARRANGE + // wait for timeout so bundle gets dropped + s.CommitAfterSeconds(s.App().BundlesKeeper.GetUploadTimeout(s.Ctx())) + s.CommitAfterSeconds(1) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrBundleDropped.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote with a different storage id than the current one", func() { + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "another_test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrInvalidStorageId.Error())) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "another_test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if voter has already voted valid", func() { + // ARRANGE + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).To(BeNil()) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrAlreadyVotedValid.Error())) + + _, txErr = s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if voter has already voted invalid", func() { + // ARRANGE + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + Expect(txErr).To(BeNil()) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeFalse()) + Expect(canVote.Reason).To(Equal(bundletypes.ErrAlreadyVotedInvalid.Error())) + + _, txErr = s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_INVALID, + }) + + Expect(txErr).NotTo(BeNil()) + Expect(txErr.Error()).To(Equal(canVote.Reason)) + }) + + It("Call can vote if voter has already voted abstain", func() { + // ARRANGE + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_ABSTAIN, + }) + + Expect(txErr).To(BeNil()) + + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeTrue()) + Expect(canVote.Reason).To(Equal("KYVE_VOTE_NO_ABSTAIN_ALLOWED")) + + _, txErr = s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).To(BeNil()) + }) + + It("Call can vote on an active pool with a data bundle with valid args", func() { + // ACT + canVote, err := s.App().QueryKeeper.CanVote(sdk.WrapSDKContext(s.Ctx()), &querytypes.QueryCanVoteRequest{ + PoolId: 0, + Staker: i.STAKER_1, + Voter: i.VALADDRESS_1, + StorageId: "test_storage_id", + }) + + // ASSERT + Expect(err).To(BeNil()) + + Expect(canVote.Possible).To(BeTrue()) + Expect(canVote.Reason).To(BeEmpty()) + + _, txErr := s.RunTx(&bundletypes.MsgVoteBundleProposal{ + Creator: i.VALADDRESS_1, + Staker: i.STAKER_1, + PoolId: 0, + StorageId: "test_storage_id", + Vote: bundletypes.VOTE_TYPE_VALID, + }) + + Expect(txErr).To(BeNil()) + }) +}) diff --git a/x/query/keeper/grpc_query_finalized_bundle.go b/x/query/keeper/grpc_query_finalized_bundle.go new file mode 100644 index 00000000..a5af464d --- /dev/null +++ b/x/query/keeper/grpc_query_finalized_bundle.go @@ -0,0 +1,53 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) FinalizedBundles(c context.Context, req *types.QueryFinalizedBundlesRequest) (*types.QueryFinalizedBundlesResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + finalizedBundles, pageRes, err := k.bundleKeeper.GetPaginatedFinalizedBundleQuery(ctx, req.Pagination, req.PoolId) + if err != nil { + return nil, err + } + + return &types.QueryFinalizedBundlesResponse{FinalizedBundles: finalizedBundles, Pagination: pageRes}, nil +} + +func (k Keeper) FinalizedBundle(c context.Context, req *types.QueryFinalizedBundleRequest) (*types.QueryFinalizedBundleResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + finalizedBundle, found := k.bundleKeeper.GetFinalizedBundle(ctx, req.PoolId, req.Id) + if !found { + return nil, sdkerrors.ErrKeyNotFound + } + + return &types.QueryFinalizedBundleResponse{FinalizedBundle: finalizedBundle}, nil +} + +func (k Keeper) FinalizedBundlesByHeight(goCtx context.Context, req *types.QueryFinalizedBundlesByHeightRequest) (*types.QueryFinalizedBundlesByHeightResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(goCtx) + finalizedBundle, found := k.bundleKeeper.GetFinalizedBundleByHeight(ctx, req.PoolId, req.Height) + if !found { + return nil, sdkerrors.ErrKeyNotFound + } + + return &types.QueryFinalizedBundlesByHeightResponse{FinalizedBundle: finalizedBundle}, nil +} diff --git a/x/query/keeper/grpc_query_pool.go b/x/query/keeper/grpc_query_pool.go new file mode 100644 index 00000000..b321c2f6 --- /dev/null +++ b/x/query/keeper/grpc_query_pool.go @@ -0,0 +1,67 @@ +package keeper + +import ( + "context" + + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Pools(c context.Context, req *types.QueryPoolsRequest) (*types.QueryPoolsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + pools, pageRes, err := k.poolKeeper.GetPaginatedPoolsQuery(ctx, req.Pagination, req.Search, req.Runtime, req.Disabled, req.StorageProviderId) + if err != nil { + return nil, err + } + + data := make([]types.PoolResponse, 0) + for i := range pools { + data = append(data, k.parsePoolResponse(ctx, &pools[i])) + } + + return &types.QueryPoolsResponse{Pools: data, Pagination: pageRes}, nil +} + +func (k Keeper) Pool(c context.Context, req *types.QueryPoolRequest) (*types.QueryPoolResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + pool, found := k.poolKeeper.GetPool(ctx, req.Id) + if !found { + return nil, sdkerrors.ErrKeyNotFound + } + + return &types.QueryPoolResponse{Pool: k.parsePoolResponse(ctx, &pool)}, nil +} + +func (k Keeper) parsePoolResponse(ctx sdk.Context, pool *pooltypes.Pool) types.PoolResponse { + bundleProposal, _ := k.bundleKeeper.GetBundleProposal(ctx, pool.Id) + stakers := k.stakerKeeper.GetAllStakerAddressesOfPool(ctx, pool.Id) + + totalSelfDelegation := uint64(0) + for _, address := range stakers { + totalSelfDelegation += k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, address, address) + } + + totalDelegation := k.delegationKeeper.GetDelegationOfPool(ctx, pool.Id) + + return types.PoolResponse{ + Id: pool.Id, + Data: pool, + BundleProposal: &bundleProposal, + Stakers: stakers, + TotalSelfDelegation: totalSelfDelegation, + TotalDelegation: totalDelegation, + Status: k.GetPoolStatus(ctx, pool), + } +} diff --git a/x/query/keeper/grpc_query_staker.go b/x/query/keeper/grpc_query_staker.go new file mode 100644 index 00000000..d2b03ad6 --- /dev/null +++ b/x/query/keeper/grpc_query_staker.go @@ -0,0 +1,72 @@ +package keeper + +import ( + "context" + "strings" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) Stakers(c context.Context, req *types.QueryStakersRequest) (*types.QueryStakersResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + data := make([]types.FullStaker, 0) + req.Search = strings.ToLower(req.Search) + + accumulator := func(address string, accumulate bool) bool { + fullStaker := k.GetFullStaker(ctx, address) + + searchAddress := strings.ToLower(fullStaker.Address) + searchMoniker := strings.ToLower(fullStaker.Metadata.Moniker) + + if strings.Contains(searchAddress, req.Search) || strings.Contains(searchMoniker, req.Search) { + if accumulate { + data = append(data, *fullStaker) + } + return true + } + + return false + } + + var pageRes *query.PageResponse + var err error + + switch req.Status { + case types.STAKER_STATUS_ACTIVE: + pageRes, err = k.delegationKeeper.GetPaginatedActiveStakersByDelegation(ctx, req.Pagination, accumulator) + case types.STAKER_STATUS_INACTIVE: + pageRes, err = k.delegationKeeper.GetPaginatedInactiveStakersByDelegation(ctx, req.Pagination, accumulator) + default: + pageRes, err = k.delegationKeeper.GetPaginatedStakersByDelegation(ctx, req.Pagination, accumulator) + } + + if err != nil { + return nil, err + } + + return &types.QueryStakersResponse{Stakers: data, Pagination: pageRes}, nil +} + +func (k Keeper) Staker(c context.Context, req *types.QueryStakerRequest) (*types.QueryStakerResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + if !k.stakerKeeper.DoesStakerExist(ctx, req.Address) { + return nil, sdkerrors.ErrKeyNotFound + } + + return &types.QueryStakerResponse{Staker: *k.GetFullStaker(ctx, req.Address)}, nil +} diff --git a/x/query/keeper/grpc_query_stakers_by_pool.go b/x/query/keeper/grpc_query_stakers_by_pool.go new file mode 100644 index 00000000..c2e3be80 --- /dev/null +++ b/x/query/keeper/grpc_query_stakers_by_pool.go @@ -0,0 +1,38 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) StakersByPool(c context.Context, req *types.QueryStakersByPoolRequest) (*types.QueryStakersByPoolResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + data := make([]types.StakerPoolResponse, 0) + + ctx := sdk.UnwrapSDKContext(c) + + _, found := k.poolKeeper.GetPool(ctx, req.PoolId) + if !found { + return nil, sdkerrors.ErrKeyNotFound + } + + valaccounts := k.stakerKeeper.GetAllValaccountsOfPool(ctx, req.PoolId) + for _, valaccount := range valaccounts { + if k.stakerKeeper.DoesStakerExist(ctx, valaccount.Staker) { + data = append(data, types.StakerPoolResponse{ + Staker: k.GetFullStaker(ctx, valaccount.Staker), + Valaccount: valaccount, + }) + } + } + + return &types.QueryStakersByPoolResponse{Stakers: data}, nil +} diff --git a/x/query/keeper/grpc_query_stakers_by_pool_count.go b/x/query/keeper/grpc_query_stakers_by_pool_count.go new file mode 100644 index 00000000..c052c621 --- /dev/null +++ b/x/query/keeper/grpc_query_stakers_by_pool_count.go @@ -0,0 +1,31 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) StakersByPoolCount(c context.Context, req *types.QueryStakersByPoolCountRequest) (*types.QueryStakersByPoolCountResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + result, pageRes, err := k.delegationKeeper.GetPaginatedActiveStakersByPoolCountAndDelegation(ctx, req.Pagination) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + data := make([]types.FullStaker, len(result)) + + for i := 0; i < len(result); i++ { + data[i] = *k.GetFullStaker(ctx, result[i]) + } + + return &types.QueryStakersByPoolCountResponse{Stakers: data, Pagination: pageRes}, nil +} diff --git a/x/query/keeper/helper.go b/x/query/keeper/helper.go new file mode 100644 index 00000000..6b9a9e40 --- /dev/null +++ b/x/query/keeper/helper.go @@ -0,0 +1,97 @@ +package keeper + +import ( + globalTypes "github.com/KYVENetwork/chain/x/global/types" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + "github.com/KYVENetwork/chain/x/query/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func (k Keeper) GetFullStaker(ctx sdk.Context, stakerAddress string) *types.FullStaker { + staker, _ := k.stakerKeeper.GetStaker(ctx, stakerAddress) + + commissionChange, found := k.stakerKeeper.GetCommissionChangeEntryByIndex2(ctx, staker.Address) + var commissionChangeEntry *types.CommissionChangeEntry = nil + if found { + commissionChangeEntry = &types.CommissionChangeEntry{ + Commission: commissionChange.Commission, + CreationDate: commissionChange.CreationDate, + } + } + + stakerMetadata := types.StakerMetadata{ + Commission: staker.Commission, + Moniker: staker.Moniker, + Website: staker.Website, + Logo: staker.Logo, + PendingCommissionChange: commissionChangeEntry, + } + + delegationData, _ := k.delegationKeeper.GetDelegationData(ctx, staker.Address) + + var poolMemberships []*types.PoolMembership + + for _, valaccount := range k.stakerKeeper.GetValaccountsFromStaker(ctx, staker.Address) { + + pool, _ := k.poolKeeper.GetPool(ctx, valaccount.PoolId) + + accountValaddress, _ := sdk.AccAddressFromBech32(valaccount.Valaddress) + balanceValaccount := k.bankKeeper.GetBalance(ctx, accountValaddress, globalTypes.Denom).Amount.Uint64() + + poolMemberships = append(poolMemberships, &types.PoolMembership{ + Pool: &types.BasicPool{ + Id: pool.Id, + Name: pool.Name, + Runtime: pool.Runtime, + Logo: pool.Logo, + OperatingCost: pool.OperatingCost, + UploadInterval: pool.UploadInterval, + TotalFunds: pool.TotalFunds, + TotalDelegation: k.delegationKeeper.GetDelegationOfPool(ctx, pool.Id), + Status: k.GetPoolStatus(ctx, &pool), + }, + Points: valaccount.Points, + IsLeaving: valaccount.IsLeaving, + Valaddress: valaccount.Valaddress, + Balance: balanceValaccount, + }) + } + + // Iterate all UnbondingDelegation entries to get total delegation unbonding amount + selfDelegationUnbonding := uint64(0) + for _, entry := range k.delegationKeeper.GetAllUnbondingDelegationQueueEntriesOfDelegator(ctx, stakerAddress) { + if entry.Staker == stakerAddress { + selfDelegationUnbonding += entry.Amount + } + } + + return &types.FullStaker{ + Address: staker.Address, + Metadata: &stakerMetadata, + SelfDelegation: k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, stakerAddress, stakerAddress), + SelfDelegationUnbonding: selfDelegationUnbonding, + TotalDelegation: k.delegationKeeper.GetDelegationAmount(ctx, staker.Address), + DelegatorCount: delegationData.DelegatorCount, + Pools: poolMemberships, + } +} + +func (k Keeper) GetPoolStatus(ctx sdk.Context, pool *pooltypes.Pool) pooltypes.PoolStatus { + totalDelegation := k.delegationKeeper.GetDelegationOfPool(ctx, pool.Id) + + var poolStatus pooltypes.PoolStatus + + if pool.UpgradePlan.ScheduledAt > 0 && uint64(ctx.BlockTime().Unix()) >= pool.UpgradePlan.ScheduledAt { + poolStatus = pooltypes.POOL_STATUS_UPGRADING + } else if pool.Disabled { + poolStatus = pooltypes.POOL_STATUS_DISABLED + } else if totalDelegation < pool.MinDelegation { + poolStatus = pooltypes.POOL_STATUS_NOT_ENOUGH_DELEGATION + } else if pool.TotalFunds == 0 { + poolStatus = pooltypes.POOL_STATUS_NO_FUNDS + } else { + poolStatus = pooltypes.POOL_STATUS_ACTIVE + } + + return poolStatus +} diff --git a/x/query/keeper/keeper.go b/x/query/keeper/keeper.go new file mode 100644 index 00000000..3facf8f2 --- /dev/null +++ b/x/query/keeper/keeper.go @@ -0,0 +1,85 @@ +package keeper + +import ( + "fmt" + + globalKeeper "github.com/KYVENetwork/chain/x/global/keeper" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + + bundlekeeper "github.com/KYVENetwork/chain/x/bundles/keeper" + delegationkeeper "github.com/KYVENetwork/chain/x/delegation/keeper" + poolkeeper "github.com/KYVENetwork/chain/x/pool/keeper" + stakerskeeper "github.com/KYVENetwork/chain/x/stakers/keeper" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + paramstore paramtypes.Subspace + + accountKeeper authkeeper.AccountKeeper + bankKeeper bankkeeper.Keeper + distrkeeper distrkeeper.Keeper + poolKeeper poolkeeper.Keeper + stakerKeeper stakerskeeper.Keeper + delegationKeeper delegationkeeper.Keeper + bundleKeeper bundlekeeper.Keeper + globalKeeper globalKeeper.Keeper + govKeeper govkeeper.Keeper + teamKeeper teamKeeper.Keeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey, + memKey storetypes.StoreKey, + ps paramtypes.Subspace, + + accountKeeper authkeeper.AccountKeeper, + bankKeeper bankkeeper.Keeper, + distrkeeper distrkeeper.Keeper, + poolKeeper poolkeeper.Keeper, + stakerKeeper stakerskeeper.Keeper, + delegationKeeper delegationkeeper.Keeper, + bundleKeeper bundlekeeper.Keeper, + globalKeeper globalKeeper.Keeper, + govKeeper govkeeper.Keeper, + teamKeeper teamKeeper.Keeper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + paramstore: ps, + + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + distrkeeper: distrkeeper, + poolKeeper: poolKeeper, + stakerKeeper: stakerKeeper, + delegationKeeper: delegationKeeper, + bundleKeeper: bundleKeeper, + globalKeeper: globalKeeper, + govKeeper: govKeeper, + teamKeeper: teamKeeper, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} diff --git a/x/query/keeper/keeper_suite_test.go b/x/query/keeper/keeper_suite_test.go new file mode 100644 index 00000000..3a9fc7d8 --- /dev/null +++ b/x/query/keeper/keeper_suite_test.go @@ -0,0 +1,13 @@ +package keeper_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Keeper Suite") +} diff --git a/x/query/module.go b/x/query/module.go new file mode 100644 index 00000000..df0d0bd5 --- /dev/null +++ b/x/query/module.go @@ -0,0 +1,152 @@ +package query + +import ( + "context" + "encoding/json" + // this line is used by starport scaffolding # 1 + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/KYVENetwork/chain/x/query/client/cli" + "github.com/KYVENetwork/chain/x/query/keeper" + "github.com/KYVENetwork/chain/x/query/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(_ *codec.LegacyAmino) {} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(_ cdctypes.InterfaceRegistry) {} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(_ codec.JSONCodec) json.RawMessage { + return nil +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(_ codec.JSONCodec, _ client.TxEncodingConfig, _ json.RawMessage) error { + return nil +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + _ = types.RegisterQueryAccountHandlerClient(context.Background(), mux, types.NewQueryAccountClient(clientCtx)) + _ = types.RegisterQueryPoolHandlerClient(context.Background(), mux, types.NewQueryPoolClient(clientCtx)) + _ = types.RegisterQueryStakersHandlerClient(context.Background(), mux, types.NewQueryStakersClient(clientCtx)) + _ = types.RegisterQueryDelegationHandlerClient(context.Background(), mux, types.NewQueryDelegationClient(clientCtx)) + _ = types.RegisterQueryBundlesHandlerClient(context.Background(), mux, types.NewQueryBundlesClient(clientCtx)) + _ = types.RegisterQueryParamsHandlerClient(context.Background(), mux, types.NewQueryParamsClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryAccountServer(cfg.QueryServer(), am.keeper) + types.RegisterQueryPoolServer(cfg.QueryServer(), am.keeper) + types.RegisterQueryStakersServer(cfg.QueryServer(), am.keeper) + types.RegisterQueryDelegationServer(cfg.QueryServer(), am.keeper) + types.RegisterQueryBundlesServer(cfg.QueryServer(), am.keeper) + types.RegisterQueryParamsServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(_ sdk.Context, _ codec.JSONCodec, _ json.RawMessage) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(_ sdk.Context, _ codec.JSONCodec) json.RawMessage { + return nil +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} diff --git a/x/query/types/account.pb.go b/x/query/types/account.pb.go new file mode 100644 index 00000000..369268b8 --- /dev/null +++ b/x/query/types/account.pb.go @@ -0,0 +1,2827 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/account.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryAccountAssetsRequest is the request type for the Query/AccountAssets RPC method. +type QueryAccountAssetsRequest struct { + // address ... + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryAccountAssetsRequest) Reset() { *m = QueryAccountAssetsRequest{} } +func (m *QueryAccountAssetsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAccountAssetsRequest) ProtoMessage() {} +func (*QueryAccountAssetsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{0} +} +func (m *QueryAccountAssetsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountAssetsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountAssetsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountAssetsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountAssetsRequest.Merge(m, src) +} +func (m *QueryAccountAssetsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountAssetsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountAssetsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountAssetsRequest proto.InternalMessageInfo + +func (m *QueryAccountAssetsRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// QueryAccountAssetsResponse is the response type for the Query/AccountAssets RPC method. +type QueryAccountAssetsResponse struct { + // balance ... + Balance uint64 `protobuf:"varint,1,opt,name=balance,proto3" json:"balance,omitempty"` + // protocol_staking ... + ProtocolSelfDelegation uint64 `protobuf:"varint,2,opt,name=protocol_self_delegation,json=protocolSelfDelegation,proto3" json:"protocol_self_delegation,omitempty"` + // protocol_staking_unbonding + ProtocolSelfDelegationUnbonding uint64 `protobuf:"varint,3,opt,name=protocol_self_delegation_unbonding,json=protocolSelfDelegationUnbonding,proto3" json:"protocol_self_delegation_unbonding,omitempty"` + // protocol_delegation ... + ProtocolDelegation uint64 `protobuf:"varint,4,opt,name=protocol_delegation,json=protocolDelegation,proto3" json:"protocol_delegation,omitempty"` + // protocol_delegation_unbonding + ProtocolDelegationUnbonding uint64 `protobuf:"varint,5,opt,name=protocol_delegation_unbonding,json=protocolDelegationUnbonding,proto3" json:"protocol_delegation_unbonding,omitempty"` + // protocol_rewards ... + ProtocolRewards uint64 `protobuf:"varint,6,opt,name=protocol_rewards,json=protocolRewards,proto3" json:"protocol_rewards,omitempty"` + // protocol_funding ... + ProtocolFunding uint64 `protobuf:"varint,7,opt,name=protocol_funding,json=protocolFunding,proto3" json:"protocol_funding,omitempty"` +} + +func (m *QueryAccountAssetsResponse) Reset() { *m = QueryAccountAssetsResponse{} } +func (m *QueryAccountAssetsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAccountAssetsResponse) ProtoMessage() {} +func (*QueryAccountAssetsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{1} +} +func (m *QueryAccountAssetsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountAssetsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountAssetsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountAssetsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountAssetsResponse.Merge(m, src) +} +func (m *QueryAccountAssetsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountAssetsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountAssetsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountAssetsResponse proto.InternalMessageInfo + +func (m *QueryAccountAssetsResponse) GetBalance() uint64 { + if m != nil { + return m.Balance + } + return 0 +} + +func (m *QueryAccountAssetsResponse) GetProtocolSelfDelegation() uint64 { + if m != nil { + return m.ProtocolSelfDelegation + } + return 0 +} + +func (m *QueryAccountAssetsResponse) GetProtocolSelfDelegationUnbonding() uint64 { + if m != nil { + return m.ProtocolSelfDelegationUnbonding + } + return 0 +} + +func (m *QueryAccountAssetsResponse) GetProtocolDelegation() uint64 { + if m != nil { + return m.ProtocolDelegation + } + return 0 +} + +func (m *QueryAccountAssetsResponse) GetProtocolDelegationUnbonding() uint64 { + if m != nil { + return m.ProtocolDelegationUnbonding + } + return 0 +} + +func (m *QueryAccountAssetsResponse) GetProtocolRewards() uint64 { + if m != nil { + return m.ProtocolRewards + } + return 0 +} + +func (m *QueryAccountAssetsResponse) GetProtocolFunding() uint64 { + if m != nil { + return m.ProtocolFunding + } + return 0 +} + +// QueryAccountFundedListRequest ... +type QueryAccountDelegationUnbondingsRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // address ... + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryAccountDelegationUnbondingsRequest) Reset() { + *m = QueryAccountDelegationUnbondingsRequest{} +} +func (m *QueryAccountDelegationUnbondingsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAccountDelegationUnbondingsRequest) ProtoMessage() {} +func (*QueryAccountDelegationUnbondingsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{2} +} +func (m *QueryAccountDelegationUnbondingsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountDelegationUnbondingsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountDelegationUnbondingsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountDelegationUnbondingsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountDelegationUnbondingsRequest.Merge(m, src) +} +func (m *QueryAccountDelegationUnbondingsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountDelegationUnbondingsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountDelegationUnbondingsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountDelegationUnbondingsRequest proto.InternalMessageInfo + +func (m *QueryAccountDelegationUnbondingsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryAccountDelegationUnbondingsRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// QueryAccountAssetsResponse is the response type for the Query/AccountAssets RPC method. +type QueryAccountDelegationUnbondingsResponse struct { + // balance ... + Unbondings []DelegationUnbonding `protobuf:"bytes,1,rep,name=unbondings,proto3" json:"unbondings"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAccountDelegationUnbondingsResponse) Reset() { + *m = QueryAccountDelegationUnbondingsResponse{} +} +func (m *QueryAccountDelegationUnbondingsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAccountDelegationUnbondingsResponse) ProtoMessage() {} +func (*QueryAccountDelegationUnbondingsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{3} +} +func (m *QueryAccountDelegationUnbondingsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountDelegationUnbondingsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountDelegationUnbondingsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountDelegationUnbondingsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountDelegationUnbondingsResponse.Merge(m, src) +} +func (m *QueryAccountDelegationUnbondingsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountDelegationUnbondingsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountDelegationUnbondingsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountDelegationUnbondingsResponse proto.InternalMessageInfo + +func (m *QueryAccountDelegationUnbondingsResponse) GetUnbondings() []DelegationUnbonding { + if m != nil { + return m.Unbondings + } + return nil +} + +func (m *QueryAccountDelegationUnbondingsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryAccountAssetsResponse is the response type for the Query/AccountAssets RPC method. +type DelegationUnbonding struct { + // amount + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + // creation_time + CreationTime uint64 `protobuf:"varint,2,opt,name=creation_time,json=creationTime,proto3" json:"creation_time,omitempty"` + // staker + Staker *FullStaker `protobuf:"bytes,3,opt,name=staker,proto3" json:"staker,omitempty"` +} + +func (m *DelegationUnbonding) Reset() { *m = DelegationUnbonding{} } +func (m *DelegationUnbonding) String() string { return proto.CompactTextString(m) } +func (*DelegationUnbonding) ProtoMessage() {} +func (*DelegationUnbonding) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{4} +} +func (m *DelegationUnbonding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DelegationUnbonding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DelegationUnbonding.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DelegationUnbonding) XXX_Merge(src proto.Message) { + xxx_messageInfo_DelegationUnbonding.Merge(m, src) +} +func (m *DelegationUnbonding) XXX_Size() int { + return m.Size() +} +func (m *DelegationUnbonding) XXX_DiscardUnknown() { + xxx_messageInfo_DelegationUnbonding.DiscardUnknown(m) +} + +var xxx_messageInfo_DelegationUnbonding proto.InternalMessageInfo + +func (m *DelegationUnbonding) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *DelegationUnbonding) GetCreationTime() uint64 { + if m != nil { + return m.CreationTime + } + return 0 +} + +func (m *DelegationUnbonding) GetStaker() *FullStaker { + if m != nil { + return m.Staker + } + return nil +} + +// QueryAccountFundedListRequest is the request type for the account queries with pagination +type QueryAccountFundedListRequest struct { + // address ... + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryAccountFundedListRequest) Reset() { *m = QueryAccountFundedListRequest{} } +func (m *QueryAccountFundedListRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAccountFundedListRequest) ProtoMessage() {} +func (*QueryAccountFundedListRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{5} +} +func (m *QueryAccountFundedListRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountFundedListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountFundedListRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountFundedListRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountFundedListRequest.Merge(m, src) +} +func (m *QueryAccountFundedListRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountFundedListRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountFundedListRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountFundedListRequest proto.InternalMessageInfo + +func (m *QueryAccountFundedListRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// QueryAccountFundedListResponse is the response type for the Query/AccountFundedList RPC method. +type QueryAccountFundedListResponse struct { + // funded ... + Funded []Funded `protobuf:"bytes,1,rep,name=funded,proto3" json:"funded"` +} + +func (m *QueryAccountFundedListResponse) Reset() { *m = QueryAccountFundedListResponse{} } +func (m *QueryAccountFundedListResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAccountFundedListResponse) ProtoMessage() {} +func (*QueryAccountFundedListResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{6} +} +func (m *QueryAccountFundedListResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountFundedListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountFundedListResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountFundedListResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountFundedListResponse.Merge(m, src) +} +func (m *QueryAccountFundedListResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountFundedListResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountFundedListResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountFundedListResponse proto.InternalMessageInfo + +func (m *QueryAccountFundedListResponse) GetFunded() []Funded { + if m != nil { + return m.Funded + } + return nil +} + +// Funded ... +type Funded struct { + // amount ... + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + // pool ... + Pool *BasicPool `protobuf:"bytes,2,opt,name=pool,proto3" json:"pool,omitempty"` +} + +func (m *Funded) Reset() { *m = Funded{} } +func (m *Funded) String() string { return proto.CompactTextString(m) } +func (*Funded) ProtoMessage() {} +func (*Funded) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{7} +} +func (m *Funded) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Funded) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Funded.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Funded) XXX_Merge(src proto.Message) { + xxx_messageInfo_Funded.Merge(m, src) +} +func (m *Funded) XXX_Size() int { + return m.Size() +} +func (m *Funded) XXX_DiscardUnknown() { + xxx_messageInfo_Funded.DiscardUnknown(m) +} + +var xxx_messageInfo_Funded proto.InternalMessageInfo + +func (m *Funded) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *Funded) GetPool() *BasicPool { + if m != nil { + return m.Pool + } + return nil +} + +// QueryAccountDelegationListRequest ... +type QueryAccountRedelegationRequest struct { + // address ... + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryAccountRedelegationRequest) Reset() { *m = QueryAccountRedelegationRequest{} } +func (m *QueryAccountRedelegationRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAccountRedelegationRequest) ProtoMessage() {} +func (*QueryAccountRedelegationRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{8} +} +func (m *QueryAccountRedelegationRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountRedelegationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountRedelegationRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountRedelegationRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountRedelegationRequest.Merge(m, src) +} +func (m *QueryAccountRedelegationRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountRedelegationRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountRedelegationRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountRedelegationRequest proto.InternalMessageInfo + +func (m *QueryAccountRedelegationRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// QueryAccountDelegationListRequest is the response type for the Query/AccountDelegationList RPC method. +type QueryAccountRedelegationResponse struct { + // redelegation_cooldown_entries ... + RedelegationCooldownEntries []RedelegationEntry `protobuf:"bytes,1,rep,name=redelegation_cooldown_entries,json=redelegationCooldownEntries,proto3" json:"redelegation_cooldown_entries"` + // availableSlots ... + AvailableSlots uint64 `protobuf:"varint,2,opt,name=available_slots,json=availableSlots,proto3" json:"available_slots,omitempty"` +} + +func (m *QueryAccountRedelegationResponse) Reset() { *m = QueryAccountRedelegationResponse{} } +func (m *QueryAccountRedelegationResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAccountRedelegationResponse) ProtoMessage() {} +func (*QueryAccountRedelegationResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{9} +} +func (m *QueryAccountRedelegationResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountRedelegationResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountRedelegationResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountRedelegationResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountRedelegationResponse.Merge(m, src) +} +func (m *QueryAccountRedelegationResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountRedelegationResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountRedelegationResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountRedelegationResponse proto.InternalMessageInfo + +func (m *QueryAccountRedelegationResponse) GetRedelegationCooldownEntries() []RedelegationEntry { + if m != nil { + return m.RedelegationCooldownEntries + } + return nil +} + +func (m *QueryAccountRedelegationResponse) GetAvailableSlots() uint64 { + if m != nil { + return m.AvailableSlots + } + return 0 +} + +// RedelegationEntry ... +type RedelegationEntry struct { + // creation_date ... + CreationDate uint64 `protobuf:"varint,1,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` + // finish_date ... + FinishDate uint64 `protobuf:"varint,2,opt,name=finish_date,json=finishDate,proto3" json:"finish_date,omitempty"` +} + +func (m *RedelegationEntry) Reset() { *m = RedelegationEntry{} } +func (m *RedelegationEntry) String() string { return proto.CompactTextString(m) } +func (*RedelegationEntry) ProtoMessage() {} +func (*RedelegationEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_51ca316755261aec, []int{10} +} +func (m *RedelegationEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedelegationEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RedelegationEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RedelegationEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedelegationEntry.Merge(m, src) +} +func (m *RedelegationEntry) XXX_Size() int { + return m.Size() +} +func (m *RedelegationEntry) XXX_DiscardUnknown() { + xxx_messageInfo_RedelegationEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_RedelegationEntry proto.InternalMessageInfo + +func (m *RedelegationEntry) GetCreationDate() uint64 { + if m != nil { + return m.CreationDate + } + return 0 +} + +func (m *RedelegationEntry) GetFinishDate() uint64 { + if m != nil { + return m.FinishDate + } + return 0 +} + +func init() { + proto.RegisterType((*QueryAccountAssetsRequest)(nil), "kyve.query.v1beta1.QueryAccountAssetsRequest") + proto.RegisterType((*QueryAccountAssetsResponse)(nil), "kyve.query.v1beta1.QueryAccountAssetsResponse") + proto.RegisterType((*QueryAccountDelegationUnbondingsRequest)(nil), "kyve.query.v1beta1.QueryAccountDelegationUnbondingsRequest") + proto.RegisterType((*QueryAccountDelegationUnbondingsResponse)(nil), "kyve.query.v1beta1.QueryAccountDelegationUnbondingsResponse") + proto.RegisterType((*DelegationUnbonding)(nil), "kyve.query.v1beta1.DelegationUnbonding") + proto.RegisterType((*QueryAccountFundedListRequest)(nil), "kyve.query.v1beta1.QueryAccountFundedListRequest") + proto.RegisterType((*QueryAccountFundedListResponse)(nil), "kyve.query.v1beta1.QueryAccountFundedListResponse") + proto.RegisterType((*Funded)(nil), "kyve.query.v1beta1.Funded") + proto.RegisterType((*QueryAccountRedelegationRequest)(nil), "kyve.query.v1beta1.QueryAccountRedelegationRequest") + proto.RegisterType((*QueryAccountRedelegationResponse)(nil), "kyve.query.v1beta1.QueryAccountRedelegationResponse") + proto.RegisterType((*RedelegationEntry)(nil), "kyve.query.v1beta1.RedelegationEntry") +} + +func init() { proto.RegisterFile("kyve/query/v1beta1/account.proto", fileDescriptor_51ca316755261aec) } + +var fileDescriptor_51ca316755261aec = []byte{ + // 896 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xce, 0x26, 0xc1, 0x15, 0x2f, 0x2d, 0xa5, 0x13, 0x54, 0x99, 0x0d, 0x5e, 0x47, 0x8b, 0xc0, + 0xa1, 0x82, 0x5d, 0xd9, 0x09, 0xa8, 0x25, 0x70, 0xa8, 0x9b, 0x84, 0x43, 0x01, 0x95, 0x35, 0x20, + 0xb5, 0x97, 0xd5, 0x78, 0x77, 0xbc, 0x59, 0x65, 0xbd, 0xe3, 0xee, 0x8c, 0x13, 0x2c, 0xc4, 0x85, + 0x13, 0x02, 0x0e, 0x48, 0xfc, 0x05, 0xfe, 0x02, 0xb7, 0x9e, 0x38, 0xf5, 0x58, 0x89, 0x0b, 0x17, + 0x10, 0x4a, 0xf8, 0x21, 0x68, 0x67, 0x66, 0xed, 0x31, 0x5e, 0xdb, 0xa4, 0xb7, 0xdd, 0x37, 0xef, + 0x7b, 0xf3, 0x7d, 0xf3, 0xbe, 0x37, 0x03, 0xdb, 0x27, 0xa3, 0x53, 0xe2, 0x3e, 0x1e, 0x92, 0x6c, + 0xe4, 0x9e, 0x36, 0xbb, 0x84, 0xe3, 0xa6, 0x8b, 0x83, 0x80, 0x0e, 0x53, 0xee, 0x0c, 0x32, 0xca, + 0x29, 0x42, 0x79, 0x86, 0x23, 0x32, 0x1c, 0x95, 0x61, 0xde, 0x0a, 0x28, 0xeb, 0x53, 0xe6, 0x76, + 0x31, 0xfb, 0x2f, 0x78, 0x80, 0xa3, 0x38, 0xc5, 0x3c, 0xa6, 0xa9, 0xc4, 0x9b, 0xaf, 0x44, 0x34, + 0xa2, 0xe2, 0xd3, 0xcd, 0xbf, 0x54, 0xf4, 0xb5, 0x88, 0xd2, 0x28, 0x21, 0x2e, 0x1e, 0xc4, 0x2e, + 0x4e, 0x53, 0xca, 0x05, 0x84, 0xa9, 0x55, 0xab, 0x84, 0x95, 0x64, 0x20, 0xd6, 0xed, 0x77, 0xe1, + 0xd5, 0xcf, 0xf2, 0xdf, 0xbb, 0x92, 0xe9, 0x5d, 0xc6, 0x08, 0x67, 0x1e, 0x79, 0x3c, 0x24, 0x8c, + 0xa3, 0x2a, 0x5c, 0xc1, 0x61, 0x98, 0x11, 0xc6, 0xaa, 0xc6, 0xb6, 0xb1, 0xf3, 0xa2, 0x57, 0xfc, + 0xda, 0xdf, 0xad, 0x81, 0x59, 0x86, 0x63, 0x03, 0x9a, 0x32, 0x92, 0x03, 0xbb, 0x38, 0xc1, 0x69, + 0x40, 0x04, 0x70, 0xdd, 0x2b, 0x7e, 0xd1, 0x6d, 0xa8, 0x8a, 0x8d, 0x03, 0x9a, 0xf8, 0x8c, 0x24, + 0x3d, 0x3f, 0x24, 0x09, 0x89, 0x04, 0xe5, 0xea, 0xaa, 0x48, 0xbd, 0x59, 0xac, 0x77, 0x48, 0xd2, + 0x3b, 0x18, 0xaf, 0xa2, 0xfb, 0x60, 0xcf, 0x43, 0xfa, 0xc3, 0xb4, 0x4b, 0xd3, 0x30, 0x4e, 0xa3, + 0xea, 0x9a, 0xa8, 0x51, 0x2f, 0xaf, 0xf1, 0x45, 0x91, 0x86, 0x5c, 0xd8, 0x1c, 0x17, 0xd3, 0x18, + 0xac, 0x0b, 0x34, 0x2a, 0x96, 0xb4, 0xdd, 0xdb, 0x50, 0x2b, 0x01, 0x68, 0x1b, 0xbf, 0x20, 0xa0, + 0x5b, 0xb3, 0xd0, 0xc9, 0xa6, 0x6f, 0xc1, 0xcb, 0xe3, 0x1a, 0x19, 0x39, 0xc3, 0x59, 0xc8, 0xaa, + 0x15, 0x01, 0xbb, 0x5e, 0xc4, 0x3d, 0x19, 0x9e, 0x4a, 0xed, 0x0d, 0xe5, 0x0e, 0x57, 0xa6, 0x53, + 0x8f, 0x64, 0xd8, 0xfe, 0xc1, 0x80, 0x86, 0xde, 0x8a, 0x92, 0x9d, 0xc7, 0x0d, 0x3d, 0x02, 0x98, + 0xb8, 0x4a, 0xb4, 0x66, 0xa3, 0xf5, 0xa6, 0x23, 0x2d, 0xe8, 0xe4, 0x16, 0x9c, 0x76, 0xa7, 0xf3, + 0x00, 0x47, 0x44, 0x61, 0x3d, 0x0d, 0xa9, 0x1b, 0x63, 0x75, 0xda, 0x18, 0xbf, 0x19, 0xb0, 0xb3, + 0x9c, 0x8d, 0xb2, 0xc9, 0x27, 0x00, 0xe3, 0x03, 0xcc, 0x2d, 0xb6, 0xb6, 0xb3, 0xd1, 0x6a, 0x38, + 0xb3, 0x53, 0xe2, 0x94, 0x54, 0x69, 0xaf, 0x3f, 0xfd, 0xab, 0xbe, 0xe2, 0x69, 0x05, 0xd0, 0x47, + 0x53, 0xea, 0x56, 0x85, 0xba, 0xc6, 0x52, 0x75, 0x92, 0x8b, 0x2e, 0xcf, 0xfe, 0xde, 0x80, 0xcd, + 0xb2, 0x06, 0xde, 0x84, 0x0a, 0xee, 0xe7, 0xaa, 0x94, 0xab, 0xd5, 0x1f, 0x7a, 0x1d, 0xae, 0x05, + 0x19, 0x91, 0x8e, 0xe0, 0x71, 0x9f, 0x28, 0x27, 0x5f, 0x2d, 0x82, 0x9f, 0xc7, 0x7d, 0x82, 0xde, + 0x83, 0x0a, 0xe3, 0xf8, 0x84, 0x64, 0xc2, 0xa3, 0x1b, 0x2d, 0xab, 0x4c, 0xe8, 0xd1, 0x30, 0x49, + 0x3a, 0x22, 0xcb, 0x53, 0xd9, 0xf6, 0x1d, 0xa8, 0xe9, 0x07, 0x9a, 0xb7, 0x9d, 0x84, 0x1f, 0xc7, + 0x8c, 0x2f, 0x9f, 0xd2, 0x47, 0x60, 0xcd, 0x83, 0xaa, 0x0e, 0xdc, 0x86, 0x4a, 0x4f, 0x44, 0xd5, + 0xe9, 0x9b, 0xe5, 0xa4, 0xf2, 0x0c, 0x75, 0xe0, 0x2a, 0xdf, 0xee, 0x40, 0x45, 0xc6, 0xe7, 0x9e, + 0x4a, 0x13, 0xd6, 0x07, 0x94, 0x26, 0xaa, 0x11, 0xb5, 0xb2, 0xca, 0x6d, 0xcc, 0xe2, 0xe0, 0x01, + 0xa5, 0x89, 0x27, 0x52, 0xed, 0x7d, 0xa8, 0xeb, 0x84, 0x3d, 0x32, 0x19, 0xb5, 0xe5, 0x6a, 0x9f, + 0x18, 0xb0, 0x3d, 0x1f, 0xad, 0x04, 0x53, 0xa8, 0x65, 0x5a, 0xdc, 0x0f, 0x28, 0x4d, 0x42, 0x7a, + 0x96, 0xfa, 0x24, 0xe5, 0x59, 0x4c, 0x0a, 0x17, 0xbe, 0x51, 0xc6, 0x56, 0x2f, 0x78, 0x98, 0xf2, + 0x6c, 0xa4, 0x8e, 0x64, 0x4b, 0xaf, 0x78, 0x4f, 0x15, 0x3c, 0x94, 0xf5, 0x50, 0x03, 0xae, 0xe3, + 0x53, 0x1c, 0x27, 0xb8, 0x9b, 0x10, 0x9f, 0x25, 0x94, 0x33, 0xe5, 0x8e, 0x97, 0xc6, 0xe1, 0x4e, + 0x1e, 0xb5, 0x1f, 0xc2, 0x8d, 0x99, 0x0d, 0xa6, 0x9c, 0x15, 0x62, 0x5e, 0x5c, 0xa7, 0x63, 0x67, + 0x1d, 0x60, 0x4e, 0x50, 0x1d, 0x36, 0x7a, 0x71, 0x1a, 0xb3, 0x63, 0x99, 0x22, 0xcb, 0x83, 0x0c, + 0xe5, 0x09, 0xad, 0x1f, 0x2b, 0x70, 0x55, 0x3f, 0x19, 0xf4, 0x8b, 0x01, 0xd7, 0xa6, 0x6e, 0x6e, + 0xf4, 0x4e, 0x99, 0xe0, 0xb9, 0x2f, 0x83, 0xe9, 0xfc, 0xdf, 0x74, 0x79, 0xec, 0xf6, 0xde, 0xb7, + 0xbf, 0xff, 0xf3, 0xf3, 0xaa, 0x83, 0xde, 0x76, 0xe7, 0xbf, 0x92, 0x3e, 0x16, 0x18, 0xf7, 0x6b, + 0xd5, 0xd0, 0x6f, 0xd0, 0x9f, 0x06, 0x6c, 0x2d, 0xb8, 0x47, 0xd0, 0xfe, 0x32, 0x16, 0x0b, 0xee, + 0x42, 0xf3, 0x83, 0xe7, 0x03, 0x2b, 0x41, 0xf7, 0x84, 0xa0, 0x0f, 0xd1, 0xfe, 0x22, 0x41, 0x65, + 0x0f, 0x85, 0xae, 0xef, 0x57, 0x03, 0x6e, 0xcc, 0xcc, 0x26, 0x6a, 0x2e, 0x23, 0x36, 0x73, 0x05, + 0x98, 0xad, 0xcb, 0x40, 0x94, 0x82, 0x3b, 0x42, 0xc1, 0x2e, 0x6a, 0x2e, 0x52, 0x20, 0x87, 0xdd, + 0x4f, 0x62, 0xc6, 0x35, 0xde, 0x4f, 0x0c, 0xd8, 0x2c, 0x19, 0x32, 0xb4, 0xbb, 0x8c, 0x46, 0xc9, + 0x40, 0x9b, 0x7b, 0x97, 0x03, 0x29, 0xf6, 0xef, 0x0b, 0xf6, 0x7b, 0xa8, 0xb5, 0x88, 0xbd, 0x3e, + 0x97, 0x13, 0xfa, 0xed, 0x83, 0xa7, 0xe7, 0x96, 0xf1, 0xec, 0xdc, 0x32, 0xfe, 0x3e, 0xb7, 0x8c, + 0x9f, 0x2e, 0xac, 0x95, 0x67, 0x17, 0xd6, 0xca, 0x1f, 0x17, 0xd6, 0xca, 0xa3, 0x5b, 0x51, 0xcc, + 0x8f, 0x87, 0x5d, 0x27, 0xa0, 0x7d, 0xf7, 0xfe, 0xc3, 0x2f, 0x0f, 0x3f, 0x25, 0xfc, 0x8c, 0x66, + 0x27, 0x6e, 0x70, 0x8c, 0xe3, 0xd4, 0xfd, 0x4a, 0x6d, 0xc3, 0x47, 0x03, 0xc2, 0xba, 0x15, 0xf1, + 0x10, 0xef, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x5b, 0x16, 0x72, 0xf8, 0x09, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryAccountClient is the client API for QueryAccount service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryAccountClient interface { + // AccountAssets returns an overview of the sum of all balances for a given user. e.g. balance, staking, funding, etc. + AccountAssets(ctx context.Context, in *QueryAccountAssetsRequest, opts ...grpc.CallOption) (*QueryAccountAssetsResponse, error) + // AccountDelegationUnbondings ... + AccountDelegationUnbondings(ctx context.Context, in *QueryAccountDelegationUnbondingsRequest, opts ...grpc.CallOption) (*QueryAccountDelegationUnbondingsResponse, error) + // AccountFundedList returns all pools the given user has funded into. + AccountFundedList(ctx context.Context, in *QueryAccountFundedListRequest, opts ...grpc.CallOption) (*QueryAccountFundedListResponse, error) + // AccountRedelegation ... + AccountRedelegation(ctx context.Context, in *QueryAccountRedelegationRequest, opts ...grpc.CallOption) (*QueryAccountRedelegationResponse, error) +} + +type queryAccountClient struct { + cc grpc1.ClientConn +} + +func NewQueryAccountClient(cc grpc1.ClientConn) QueryAccountClient { + return &queryAccountClient{cc} +} + +func (c *queryAccountClient) AccountAssets(ctx context.Context, in *QueryAccountAssetsRequest, opts ...grpc.CallOption) (*QueryAccountAssetsResponse, error) { + out := new(QueryAccountAssetsResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryAccount/AccountAssets", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryAccountClient) AccountDelegationUnbondings(ctx context.Context, in *QueryAccountDelegationUnbondingsRequest, opts ...grpc.CallOption) (*QueryAccountDelegationUnbondingsResponse, error) { + out := new(QueryAccountDelegationUnbondingsResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryAccount/AccountDelegationUnbondings", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryAccountClient) AccountFundedList(ctx context.Context, in *QueryAccountFundedListRequest, opts ...grpc.CallOption) (*QueryAccountFundedListResponse, error) { + out := new(QueryAccountFundedListResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryAccount/AccountFundedList", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryAccountClient) AccountRedelegation(ctx context.Context, in *QueryAccountRedelegationRequest, opts ...grpc.CallOption) (*QueryAccountRedelegationResponse, error) { + out := new(QueryAccountRedelegationResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryAccount/AccountRedelegation", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryAccountServer is the server API for QueryAccount service. +type QueryAccountServer interface { + // AccountAssets returns an overview of the sum of all balances for a given user. e.g. balance, staking, funding, etc. + AccountAssets(context.Context, *QueryAccountAssetsRequest) (*QueryAccountAssetsResponse, error) + // AccountDelegationUnbondings ... + AccountDelegationUnbondings(context.Context, *QueryAccountDelegationUnbondingsRequest) (*QueryAccountDelegationUnbondingsResponse, error) + // AccountFundedList returns all pools the given user has funded into. + AccountFundedList(context.Context, *QueryAccountFundedListRequest) (*QueryAccountFundedListResponse, error) + // AccountRedelegation ... + AccountRedelegation(context.Context, *QueryAccountRedelegationRequest) (*QueryAccountRedelegationResponse, error) +} + +// UnimplementedQueryAccountServer can be embedded to have forward compatible implementations. +type UnimplementedQueryAccountServer struct { +} + +func (*UnimplementedQueryAccountServer) AccountAssets(ctx context.Context, req *QueryAccountAssetsRequest) (*QueryAccountAssetsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AccountAssets not implemented") +} +func (*UnimplementedQueryAccountServer) AccountDelegationUnbondings(ctx context.Context, req *QueryAccountDelegationUnbondingsRequest) (*QueryAccountDelegationUnbondingsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AccountDelegationUnbondings not implemented") +} +func (*UnimplementedQueryAccountServer) AccountFundedList(ctx context.Context, req *QueryAccountFundedListRequest) (*QueryAccountFundedListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AccountFundedList not implemented") +} +func (*UnimplementedQueryAccountServer) AccountRedelegation(ctx context.Context, req *QueryAccountRedelegationRequest) (*QueryAccountRedelegationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AccountRedelegation not implemented") +} + +func RegisterQueryAccountServer(s grpc1.Server, srv QueryAccountServer) { + s.RegisterService(&_QueryAccount_serviceDesc, srv) +} + +func _QueryAccount_AccountAssets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAccountAssetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryAccountServer).AccountAssets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryAccount/AccountAssets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryAccountServer).AccountAssets(ctx, req.(*QueryAccountAssetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryAccount_AccountDelegationUnbondings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAccountDelegationUnbondingsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryAccountServer).AccountDelegationUnbondings(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryAccount/AccountDelegationUnbondings", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryAccountServer).AccountDelegationUnbondings(ctx, req.(*QueryAccountDelegationUnbondingsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryAccount_AccountFundedList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAccountFundedListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryAccountServer).AccountFundedList(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryAccount/AccountFundedList", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryAccountServer).AccountFundedList(ctx, req.(*QueryAccountFundedListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryAccount_AccountRedelegation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAccountRedelegationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryAccountServer).AccountRedelegation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryAccount/AccountRedelegation", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryAccountServer).AccountRedelegation(ctx, req.(*QueryAccountRedelegationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryAccount_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.query.v1beta1.QueryAccount", + HandlerType: (*QueryAccountServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AccountAssets", + Handler: _QueryAccount_AccountAssets_Handler, + }, + { + MethodName: "AccountDelegationUnbondings", + Handler: _QueryAccount_AccountDelegationUnbondings_Handler, + }, + { + MethodName: "AccountFundedList", + Handler: _QueryAccount_AccountFundedList_Handler, + }, + { + MethodName: "AccountRedelegation", + Handler: _QueryAccount_AccountRedelegation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/query/v1beta1/account.proto", +} + +func (m *QueryAccountAssetsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountAssetsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountAssetsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintAccount(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountAssetsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountAssetsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountAssetsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProtocolFunding != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.ProtocolFunding)) + i-- + dAtA[i] = 0x38 + } + if m.ProtocolRewards != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.ProtocolRewards)) + i-- + dAtA[i] = 0x30 + } + if m.ProtocolDelegationUnbonding != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.ProtocolDelegationUnbonding)) + i-- + dAtA[i] = 0x28 + } + if m.ProtocolDelegation != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.ProtocolDelegation)) + i-- + dAtA[i] = 0x20 + } + if m.ProtocolSelfDelegationUnbonding != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.ProtocolSelfDelegationUnbonding)) + i-- + dAtA[i] = 0x18 + } + if m.ProtocolSelfDelegation != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.ProtocolSelfDelegation)) + i-- + dAtA[i] = 0x10 + } + if m.Balance != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.Balance)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountDelegationUnbondingsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountDelegationUnbondingsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountDelegationUnbondingsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintAccount(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + } + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountDelegationUnbondingsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountDelegationUnbondingsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountDelegationUnbondingsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Unbondings) > 0 { + for iNdEx := len(m.Unbondings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Unbondings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DelegationUnbonding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DelegationUnbonding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DelegationUnbonding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Staker != nil { + { + size, err := m.Staker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CreationTime != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.CreationTime)) + i-- + dAtA[i] = 0x10 + } + if m.Amount != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountFundedListRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountFundedListRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountFundedListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintAccount(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountFundedListResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountFundedListResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountFundedListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Funded) > 0 { + for iNdEx := len(m.Funded) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Funded[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Funded) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Funded) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Funded) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pool != nil { + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Amount != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountRedelegationRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountRedelegationRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountRedelegationRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintAccount(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountRedelegationResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountRedelegationResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountRedelegationResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AvailableSlots != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.AvailableSlots)) + i-- + dAtA[i] = 0x10 + } + if len(m.RedelegationCooldownEntries) > 0 { + for iNdEx := len(m.RedelegationCooldownEntries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RedelegationCooldownEntries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RedelegationEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedelegationEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RedelegationEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FinishDate != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.FinishDate)) + i-- + dAtA[i] = 0x10 + } + if m.CreationDate != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.CreationDate)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintAccount(dAtA []byte, offset int, v uint64) int { + offset -= sovAccount(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAccountAssetsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *QueryAccountAssetsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Balance != 0 { + n += 1 + sovAccount(uint64(m.Balance)) + } + if m.ProtocolSelfDelegation != 0 { + n += 1 + sovAccount(uint64(m.ProtocolSelfDelegation)) + } + if m.ProtocolSelfDelegationUnbonding != 0 { + n += 1 + sovAccount(uint64(m.ProtocolSelfDelegationUnbonding)) + } + if m.ProtocolDelegation != 0 { + n += 1 + sovAccount(uint64(m.ProtocolDelegation)) + } + if m.ProtocolDelegationUnbonding != 0 { + n += 1 + sovAccount(uint64(m.ProtocolDelegationUnbonding)) + } + if m.ProtocolRewards != 0 { + n += 1 + sovAccount(uint64(m.ProtocolRewards)) + } + if m.ProtocolFunding != 0 { + n += 1 + sovAccount(uint64(m.ProtocolFunding)) + } + return n +} + +func (m *QueryAccountDelegationUnbondingsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovAccount(uint64(l)) + } + l = len(m.Address) + if l > 0 { + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *QueryAccountDelegationUnbondingsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Unbondings) > 0 { + for _, e := range m.Unbondings { + l = e.Size() + n += 1 + l + sovAccount(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *DelegationUnbonding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Amount != 0 { + n += 1 + sovAccount(uint64(m.Amount)) + } + if m.CreationTime != 0 { + n += 1 + sovAccount(uint64(m.CreationTime)) + } + if m.Staker != nil { + l = m.Staker.Size() + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *QueryAccountFundedListRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *QueryAccountFundedListResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Funded) > 0 { + for _, e := range m.Funded { + l = e.Size() + n += 1 + l + sovAccount(uint64(l)) + } + } + return n +} + +func (m *Funded) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Amount != 0 { + n += 1 + sovAccount(uint64(m.Amount)) + } + if m.Pool != nil { + l = m.Pool.Size() + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *QueryAccountRedelegationRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovAccount(uint64(l)) + } + return n +} + +func (m *QueryAccountRedelegationResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RedelegationCooldownEntries) > 0 { + for _, e := range m.RedelegationCooldownEntries { + l = e.Size() + n += 1 + l + sovAccount(uint64(l)) + } + } + if m.AvailableSlots != 0 { + n += 1 + sovAccount(uint64(m.AvailableSlots)) + } + return n +} + +func (m *RedelegationEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CreationDate != 0 { + n += 1 + sovAccount(uint64(m.CreationDate)) + } + if m.FinishDate != 0 { + n += 1 + sovAccount(uint64(m.FinishDate)) + } + return n +} + +func sovAccount(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAccount(x uint64) (n int) { + return sovAccount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAccountAssetsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountAssetsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountAssetsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountAssetsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountAssetsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountAssetsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) + } + m.Balance = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Balance |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolSelfDelegation", wireType) + } + m.ProtocolSelfDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProtocolSelfDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolSelfDelegationUnbonding", wireType) + } + m.ProtocolSelfDelegationUnbonding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProtocolSelfDelegationUnbonding |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolDelegation", wireType) + } + m.ProtocolDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProtocolDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolDelegationUnbonding", wireType) + } + m.ProtocolDelegationUnbonding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProtocolDelegationUnbonding |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolRewards", wireType) + } + m.ProtocolRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProtocolRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtocolFunding", wireType) + } + m.ProtocolFunding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProtocolFunding |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountDelegationUnbondingsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountDelegationUnbondingsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountDelegationUnbondingsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountDelegationUnbondingsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountDelegationUnbondingsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountDelegationUnbondingsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unbondings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unbondings = append(m.Unbondings, DelegationUnbonding{}) + if err := m.Unbondings[len(m.Unbondings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DelegationUnbonding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DelegationUnbonding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DelegationUnbonding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTime", wireType) + } + m.CreationTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Staker == nil { + m.Staker = &FullStaker{} + } + if err := m.Staker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountFundedListRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountFundedListRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountFundedListRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountFundedListResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountFundedListResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountFundedListResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Funded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Funded = append(m.Funded, Funded{}) + if err := m.Funded[len(m.Funded)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Funded) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Funded: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Funded: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pool == nil { + m.Pool = &BasicPool{} + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountRedelegationRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountRedelegationRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountRedelegationRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountRedelegationResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountRedelegationResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountRedelegationResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedelegationCooldownEntries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedelegationCooldownEntries = append(m.RedelegationCooldownEntries, RedelegationEntry{}) + if err := m.RedelegationCooldownEntries[len(m.RedelegationCooldownEntries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableSlots", wireType) + } + m.AvailableSlots = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableSlots |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedelegationEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedelegationEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedelegationEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationDate", wireType) + } + m.CreationDate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationDate |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishDate", wireType) + } + m.FinishDate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FinishDate |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAccount(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAccount + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAccount + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAccount + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAccount = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAccount = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAccount = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/account.pb.gw.go b/x/query/types/account.pb.gw.go new file mode 100644 index 00000000..cb4152fa --- /dev/null +++ b/x/query/types/account.pb.gw.go @@ -0,0 +1,510 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/query/v1beta1/account.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_QueryAccount_AccountAssets_0(ctx context.Context, marshaler runtime.Marshaler, client QueryAccountClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountAssetsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := client.AccountAssets(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryAccount_AccountAssets_0(ctx context.Context, marshaler runtime.Marshaler, server QueryAccountServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountAssetsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := server.AccountAssets(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_QueryAccount_AccountDelegationUnbondings_0 = &utilities.DoubleArray{Encoding: map[string]int{"address": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_QueryAccount_AccountDelegationUnbondings_0(ctx context.Context, marshaler runtime.Marshaler, client QueryAccountClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountDelegationUnbondingsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryAccount_AccountDelegationUnbondings_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AccountDelegationUnbondings(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryAccount_AccountDelegationUnbondings_0(ctx context.Context, marshaler runtime.Marshaler, server QueryAccountServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountDelegationUnbondingsRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryAccount_AccountDelegationUnbondings_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AccountDelegationUnbondings(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryAccount_AccountFundedList_0(ctx context.Context, marshaler runtime.Marshaler, client QueryAccountClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountFundedListRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := client.AccountFundedList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryAccount_AccountFundedList_0(ctx context.Context, marshaler runtime.Marshaler, server QueryAccountServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountFundedListRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := server.AccountFundedList(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryAccount_AccountRedelegation_0(ctx context.Context, marshaler runtime.Marshaler, client QueryAccountClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountRedelegationRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := client.AccountRedelegation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryAccount_AccountRedelegation_0(ctx context.Context, marshaler runtime.Marshaler, server QueryAccountServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountRedelegationRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := server.AccountRedelegation(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryAccountHandlerServer registers the http handlers for service QueryAccount to "mux". +// UnaryRPC :call QueryAccountServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryAccountHandlerFromEndpoint instead. +func RegisterQueryAccountHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryAccountServer) error { + + mux.Handle("GET", pattern_QueryAccount_AccountAssets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryAccount_AccountAssets_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountAssets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryAccount_AccountDelegationUnbondings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryAccount_AccountDelegationUnbondings_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountDelegationUnbondings_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryAccount_AccountFundedList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryAccount_AccountFundedList_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountFundedList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryAccount_AccountRedelegation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryAccount_AccountRedelegation_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountRedelegation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryAccountHandlerFromEndpoint is same as RegisterQueryAccountHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryAccountHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryAccountHandler(ctx, mux, conn) +} + +// RegisterQueryAccountHandler registers the http handlers for service QueryAccount to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryAccountHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryAccountHandlerClient(ctx, mux, NewQueryAccountClient(conn)) +} + +// RegisterQueryAccountHandlerClient registers the http handlers for service QueryAccount +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryAccountClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryAccountClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryAccountClient" to call the correct interceptors. +func RegisterQueryAccountHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryAccountClient) error { + + mux.Handle("GET", pattern_QueryAccount_AccountAssets_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryAccount_AccountAssets_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountAssets_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryAccount_AccountDelegationUnbondings_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryAccount_AccountDelegationUnbondings_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountDelegationUnbondings_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryAccount_AccountFundedList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryAccount_AccountFundedList_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountFundedList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryAccount_AccountRedelegation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryAccount_AccountRedelegation_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryAccount_AccountRedelegation_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_QueryAccount_AccountAssets_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "account_assets", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryAccount_AccountDelegationUnbondings_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "account_delegation_unbondings", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryAccount_AccountFundedList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "account_funded_list", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryAccount_AccountRedelegation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "account_redelegation", "address"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_QueryAccount_AccountAssets_0 = runtime.ForwardResponseMessage + + forward_QueryAccount_AccountDelegationUnbondings_0 = runtime.ForwardResponseMessage + + forward_QueryAccount_AccountFundedList_0 = runtime.ForwardResponseMessage + + forward_QueryAccount_AccountRedelegation_0 = runtime.ForwardResponseMessage +) diff --git a/x/query/types/bundles.pb.go b/x/query/types/bundles.pb.go new file mode 100644 index 00000000..94991af3 --- /dev/null +++ b/x/query/types/bundles.pb.go @@ -0,0 +1,3584 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/bundles.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/KYVENetwork/chain/x/bundles/types" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryFinalizedBundlesRequest is the request type for the Query/Staker RPC method. +type QueryFinalizedBundlesRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // pool_id ... + PoolId uint64 `protobuf:"varint,2,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` +} + +func (m *QueryFinalizedBundlesRequest) Reset() { *m = QueryFinalizedBundlesRequest{} } +func (m *QueryFinalizedBundlesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedBundlesRequest) ProtoMessage() {} +func (*QueryFinalizedBundlesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{0} +} +func (m *QueryFinalizedBundlesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedBundlesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedBundlesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedBundlesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedBundlesRequest.Merge(m, src) +} +func (m *QueryFinalizedBundlesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedBundlesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedBundlesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedBundlesRequest proto.InternalMessageInfo + +func (m *QueryFinalizedBundlesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryFinalizedBundlesRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +// QueryStakersByPoolResponse is the response type for the Query/Staker RPC method. +type QueryFinalizedBundlesResponse struct { + // finalized_bundles ... + FinalizedBundles []types.FinalizedBundle `protobuf:"bytes,1,rep,name=finalized_bundles,json=finalizedBundles,proto3" json:"finalized_bundles"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryFinalizedBundlesResponse) Reset() { *m = QueryFinalizedBundlesResponse{} } +func (m *QueryFinalizedBundlesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedBundlesResponse) ProtoMessage() {} +func (*QueryFinalizedBundlesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{1} +} +func (m *QueryFinalizedBundlesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedBundlesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedBundlesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedBundlesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedBundlesResponse.Merge(m, src) +} +func (m *QueryFinalizedBundlesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedBundlesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedBundlesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedBundlesResponse proto.InternalMessageInfo + +func (m *QueryFinalizedBundlesResponse) GetFinalizedBundles() []types.FinalizedBundle { + if m != nil { + return m.FinalizedBundles + } + return nil +} + +func (m *QueryFinalizedBundlesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryFinalizedBundleRequest is the request type for the Query/Staker RPC method. +type QueryFinalizedBundleRequest struct { + // pool_id ... + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // id ... + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *QueryFinalizedBundleRequest) Reset() { *m = QueryFinalizedBundleRequest{} } +func (m *QueryFinalizedBundleRequest) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedBundleRequest) ProtoMessage() {} +func (*QueryFinalizedBundleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{2} +} +func (m *QueryFinalizedBundleRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedBundleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedBundleRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedBundleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedBundleRequest.Merge(m, src) +} +func (m *QueryFinalizedBundleRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedBundleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedBundleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedBundleRequest proto.InternalMessageInfo + +func (m *QueryFinalizedBundleRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *QueryFinalizedBundleRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// QueryFinalizedBundleResponse is the response type for the Query/Staker RPC method. +type QueryFinalizedBundleResponse struct { + // finalized_bundle ... + FinalizedBundle types.FinalizedBundle `protobuf:"bytes,1,opt,name=finalized_bundle,json=finalizedBundle,proto3" json:"finalized_bundle"` +} + +func (m *QueryFinalizedBundleResponse) Reset() { *m = QueryFinalizedBundleResponse{} } +func (m *QueryFinalizedBundleResponse) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedBundleResponse) ProtoMessage() {} +func (*QueryFinalizedBundleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{3} +} +func (m *QueryFinalizedBundleResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedBundleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedBundleResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedBundleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedBundleResponse.Merge(m, src) +} +func (m *QueryFinalizedBundleResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedBundleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedBundleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedBundleResponse proto.InternalMessageInfo + +func (m *QueryFinalizedBundleResponse) GetFinalizedBundle() types.FinalizedBundle { + if m != nil { + return m.FinalizedBundle + } + return types.FinalizedBundle{} +} + +// QueryFinalizedBundleRequest is the request type for the Query/Staker RPC method. +type QueryFinalizedBundlesByHeightRequest struct { + // pool_id ... + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // id ... + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *QueryFinalizedBundlesByHeightRequest) Reset() { *m = QueryFinalizedBundlesByHeightRequest{} } +func (m *QueryFinalizedBundlesByHeightRequest) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedBundlesByHeightRequest) ProtoMessage() {} +func (*QueryFinalizedBundlesByHeightRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{4} +} +func (m *QueryFinalizedBundlesByHeightRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedBundlesByHeightRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedBundlesByHeightRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedBundlesByHeightRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedBundlesByHeightRequest.Merge(m, src) +} +func (m *QueryFinalizedBundlesByHeightRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedBundlesByHeightRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedBundlesByHeightRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedBundlesByHeightRequest proto.InternalMessageInfo + +func (m *QueryFinalizedBundlesByHeightRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *QueryFinalizedBundlesByHeightRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +// QueryFinalizedBundleResponse is the response type for the Query/Staker RPC method. +type QueryFinalizedBundlesByHeightResponse struct { + // finalized_bundle ... + FinalizedBundle types.FinalizedBundle `protobuf:"bytes,1,opt,name=finalized_bundle,json=finalizedBundle,proto3" json:"finalized_bundle"` +} + +func (m *QueryFinalizedBundlesByHeightResponse) Reset() { *m = QueryFinalizedBundlesByHeightResponse{} } +func (m *QueryFinalizedBundlesByHeightResponse) String() string { return proto.CompactTextString(m) } +func (*QueryFinalizedBundlesByHeightResponse) ProtoMessage() {} +func (*QueryFinalizedBundlesByHeightResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{5} +} +func (m *QueryFinalizedBundlesByHeightResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryFinalizedBundlesByHeightResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryFinalizedBundlesByHeightResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryFinalizedBundlesByHeightResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryFinalizedBundlesByHeightResponse.Merge(m, src) +} +func (m *QueryFinalizedBundlesByHeightResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryFinalizedBundlesByHeightResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryFinalizedBundlesByHeightResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryFinalizedBundlesByHeightResponse proto.InternalMessageInfo + +func (m *QueryFinalizedBundlesByHeightResponse) GetFinalizedBundle() types.FinalizedBundle { + if m != nil { + return m.FinalizedBundle + } + return types.FinalizedBundle{} +} + +// QueryCurrentVoteStatusRequest is the request type for the Query/Staker RPC method. +type QueryCurrentVoteStatusRequest struct { + // pool_id ... + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` +} + +func (m *QueryCurrentVoteStatusRequest) Reset() { *m = QueryCurrentVoteStatusRequest{} } +func (m *QueryCurrentVoteStatusRequest) String() string { return proto.CompactTextString(m) } +func (*QueryCurrentVoteStatusRequest) ProtoMessage() {} +func (*QueryCurrentVoteStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{6} +} +func (m *QueryCurrentVoteStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCurrentVoteStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCurrentVoteStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCurrentVoteStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCurrentVoteStatusRequest.Merge(m, src) +} +func (m *QueryCurrentVoteStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryCurrentVoteStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCurrentVoteStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCurrentVoteStatusRequest proto.InternalMessageInfo + +func (m *QueryCurrentVoteStatusRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +// QueryCurrentVoteStatusResponse is the response type for the Query/Staker RPC method. +type QueryCurrentVoteStatusResponse struct { + // valid ... + Valid uint64 `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + // invalid ... + Invalid uint64 `protobuf:"varint,2,opt,name=invalid,proto3" json:"invalid,omitempty"` + // abstain ... + Abstain uint64 `protobuf:"varint,3,opt,name=abstain,proto3" json:"abstain,omitempty"` + // total ... + Total uint64 `protobuf:"varint,4,opt,name=total,proto3" json:"total,omitempty"` +} + +func (m *QueryCurrentVoteStatusResponse) Reset() { *m = QueryCurrentVoteStatusResponse{} } +func (m *QueryCurrentVoteStatusResponse) String() string { return proto.CompactTextString(m) } +func (*QueryCurrentVoteStatusResponse) ProtoMessage() {} +func (*QueryCurrentVoteStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{7} +} +func (m *QueryCurrentVoteStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCurrentVoteStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCurrentVoteStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCurrentVoteStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCurrentVoteStatusResponse.Merge(m, src) +} +func (m *QueryCurrentVoteStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryCurrentVoteStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCurrentVoteStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCurrentVoteStatusResponse proto.InternalMessageInfo + +func (m *QueryCurrentVoteStatusResponse) GetValid() uint64 { + if m != nil { + return m.Valid + } + return 0 +} + +func (m *QueryCurrentVoteStatusResponse) GetInvalid() uint64 { + if m != nil { + return m.Invalid + } + return 0 +} + +func (m *QueryCurrentVoteStatusResponse) GetAbstain() uint64 { + if m != nil { + return m.Abstain + } + return 0 +} + +func (m *QueryCurrentVoteStatusResponse) GetTotal() uint64 { + if m != nil { + return m.Total + } + return 0 +} + +// QueryCanProposeRequest is the request type for the Query/CanPropose RPC method. +type QueryCanValidateRequest struct { + // pool_id defines the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // valaddress ... + Valaddress string `protobuf:"bytes,2,opt,name=valaddress,proto3" json:"valaddress,omitempty"` +} + +func (m *QueryCanValidateRequest) Reset() { *m = QueryCanValidateRequest{} } +func (m *QueryCanValidateRequest) String() string { return proto.CompactTextString(m) } +func (*QueryCanValidateRequest) ProtoMessage() {} +func (*QueryCanValidateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{8} +} +func (m *QueryCanValidateRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCanValidateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCanValidateRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCanValidateRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCanValidateRequest.Merge(m, src) +} +func (m *QueryCanValidateRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryCanValidateRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCanValidateRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCanValidateRequest proto.InternalMessageInfo + +func (m *QueryCanValidateRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *QueryCanValidateRequest) GetValaddress() string { + if m != nil { + return m.Valaddress + } + return "" +} + +// QueryCanProposeResponse is the response type for the Query/CanPropose RPC method. +type QueryCanValidateResponse struct { + // possible ... + Possible bool `protobuf:"varint,1,opt,name=possible,proto3" json:"possible,omitempty"` + // reason ... + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (m *QueryCanValidateResponse) Reset() { *m = QueryCanValidateResponse{} } +func (m *QueryCanValidateResponse) String() string { return proto.CompactTextString(m) } +func (*QueryCanValidateResponse) ProtoMessage() {} +func (*QueryCanValidateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{9} +} +func (m *QueryCanValidateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCanValidateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCanValidateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCanValidateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCanValidateResponse.Merge(m, src) +} +func (m *QueryCanValidateResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryCanValidateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCanValidateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCanValidateResponse proto.InternalMessageInfo + +func (m *QueryCanValidateResponse) GetPossible() bool { + if m != nil { + return m.Possible + } + return false +} + +func (m *QueryCanValidateResponse) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +// QueryCanProposeRequest is the request type for the Query/CanPropose RPC method. +type QueryCanProposeRequest struct { + // pool_id defines the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // proposer ... + Proposer string `protobuf:"bytes,3,opt,name=proposer,proto3" json:"proposer,omitempty"` + // from_index ... + FromIndex uint64 `protobuf:"varint,4,opt,name=from_index,json=fromIndex,proto3" json:"from_index,omitempty"` +} + +func (m *QueryCanProposeRequest) Reset() { *m = QueryCanProposeRequest{} } +func (m *QueryCanProposeRequest) String() string { return proto.CompactTextString(m) } +func (*QueryCanProposeRequest) ProtoMessage() {} +func (*QueryCanProposeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{10} +} +func (m *QueryCanProposeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCanProposeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCanProposeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCanProposeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCanProposeRequest.Merge(m, src) +} +func (m *QueryCanProposeRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryCanProposeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCanProposeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCanProposeRequest proto.InternalMessageInfo + +func (m *QueryCanProposeRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *QueryCanProposeRequest) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *QueryCanProposeRequest) GetProposer() string { + if m != nil { + return m.Proposer + } + return "" +} + +func (m *QueryCanProposeRequest) GetFromIndex() uint64 { + if m != nil { + return m.FromIndex + } + return 0 +} + +// QueryCanProposeResponse is the response type for the Query/CanPropose RPC method. +type QueryCanProposeResponse struct { + // possible ... + Possible bool `protobuf:"varint,1,opt,name=possible,proto3" json:"possible,omitempty"` + // reason ... + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (m *QueryCanProposeResponse) Reset() { *m = QueryCanProposeResponse{} } +func (m *QueryCanProposeResponse) String() string { return proto.CompactTextString(m) } +func (*QueryCanProposeResponse) ProtoMessage() {} +func (*QueryCanProposeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{11} +} +func (m *QueryCanProposeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCanProposeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCanProposeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCanProposeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCanProposeResponse.Merge(m, src) +} +func (m *QueryCanProposeResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryCanProposeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCanProposeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCanProposeResponse proto.InternalMessageInfo + +func (m *QueryCanProposeResponse) GetPossible() bool { + if m != nil { + return m.Possible + } + return false +} + +func (m *QueryCanProposeResponse) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +// QueryCanVoteRequest is the request type for the Query/CanVote RPC method. +type QueryCanVoteRequest struct { + // pool_id defines the unique ID of the pool. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // voter ... + Voter string `protobuf:"bytes,3,opt,name=voter,proto3" json:"voter,omitempty"` + // storage_id ... + StorageId string `protobuf:"bytes,4,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` +} + +func (m *QueryCanVoteRequest) Reset() { *m = QueryCanVoteRequest{} } +func (m *QueryCanVoteRequest) String() string { return proto.CompactTextString(m) } +func (*QueryCanVoteRequest) ProtoMessage() {} +func (*QueryCanVoteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{12} +} +func (m *QueryCanVoteRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCanVoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCanVoteRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCanVoteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCanVoteRequest.Merge(m, src) +} +func (m *QueryCanVoteRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryCanVoteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCanVoteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCanVoteRequest proto.InternalMessageInfo + +func (m *QueryCanVoteRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *QueryCanVoteRequest) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *QueryCanVoteRequest) GetVoter() string { + if m != nil { + return m.Voter + } + return "" +} + +func (m *QueryCanVoteRequest) GetStorageId() string { + if m != nil { + return m.StorageId + } + return "" +} + +// QueryCanVoteResponse is the response type for the Query/CanVote RPC method. +type QueryCanVoteResponse struct { + // possible ... + Possible bool `protobuf:"varint,1,opt,name=possible,proto3" json:"possible,omitempty"` + // reason ... + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (m *QueryCanVoteResponse) Reset() { *m = QueryCanVoteResponse{} } +func (m *QueryCanVoteResponse) String() string { return proto.CompactTextString(m) } +func (*QueryCanVoteResponse) ProtoMessage() {} +func (*QueryCanVoteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b49b126c38ac815c, []int{13} +} +func (m *QueryCanVoteResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCanVoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCanVoteResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCanVoteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCanVoteResponse.Merge(m, src) +} +func (m *QueryCanVoteResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryCanVoteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCanVoteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCanVoteResponse proto.InternalMessageInfo + +func (m *QueryCanVoteResponse) GetPossible() bool { + if m != nil { + return m.Possible + } + return false +} + +func (m *QueryCanVoteResponse) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func init() { + proto.RegisterType((*QueryFinalizedBundlesRequest)(nil), "kyve.query.v1beta1.QueryFinalizedBundlesRequest") + proto.RegisterType((*QueryFinalizedBundlesResponse)(nil), "kyve.query.v1beta1.QueryFinalizedBundlesResponse") + proto.RegisterType((*QueryFinalizedBundleRequest)(nil), "kyve.query.v1beta1.QueryFinalizedBundleRequest") + proto.RegisterType((*QueryFinalizedBundleResponse)(nil), "kyve.query.v1beta1.QueryFinalizedBundleResponse") + proto.RegisterType((*QueryFinalizedBundlesByHeightRequest)(nil), "kyve.query.v1beta1.QueryFinalizedBundlesByHeightRequest") + proto.RegisterType((*QueryFinalizedBundlesByHeightResponse)(nil), "kyve.query.v1beta1.QueryFinalizedBundlesByHeightResponse") + proto.RegisterType((*QueryCurrentVoteStatusRequest)(nil), "kyve.query.v1beta1.QueryCurrentVoteStatusRequest") + proto.RegisterType((*QueryCurrentVoteStatusResponse)(nil), "kyve.query.v1beta1.QueryCurrentVoteStatusResponse") + proto.RegisterType((*QueryCanValidateRequest)(nil), "kyve.query.v1beta1.QueryCanValidateRequest") + proto.RegisterType((*QueryCanValidateResponse)(nil), "kyve.query.v1beta1.QueryCanValidateResponse") + proto.RegisterType((*QueryCanProposeRequest)(nil), "kyve.query.v1beta1.QueryCanProposeRequest") + proto.RegisterType((*QueryCanProposeResponse)(nil), "kyve.query.v1beta1.QueryCanProposeResponse") + proto.RegisterType((*QueryCanVoteRequest)(nil), "kyve.query.v1beta1.QueryCanVoteRequest") + proto.RegisterType((*QueryCanVoteResponse)(nil), "kyve.query.v1beta1.QueryCanVoteResponse") +} + +func init() { proto.RegisterFile("kyve/query/v1beta1/bundles.proto", fileDescriptor_b49b126c38ac815c) } + +var fileDescriptor_b49b126c38ac815c = []byte{ + // 960 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xce, 0xb8, 0x21, 0xa9, 0x5f, 0x10, 0x4d, 0x87, 0xa8, 0xb5, 0x96, 0xd6, 0x44, 0x2b, 0x4a, + 0xa3, 0x14, 0xed, 0xd4, 0x89, 0x50, 0x1b, 0x71, 0x40, 0xa4, 0x25, 0x90, 0x52, 0xaa, 0xb2, 0x48, + 0xe1, 0xc7, 0xc5, 0x1a, 0x67, 0x27, 0x9b, 0x55, 0x9c, 0x1d, 0x77, 0x67, 0x6c, 0x6a, 0x2c, 0xab, + 0x08, 0xf1, 0x07, 0x20, 0xf1, 0x07, 0x70, 0x86, 0x13, 0x1c, 0x38, 0x70, 0xe5, 0xd4, 0x63, 0x24, + 0x2e, 0x88, 0x03, 0x42, 0x09, 0x7f, 0x08, 0x9a, 0x1f, 0x6b, 0xaf, 0x7f, 0xc5, 0x9b, 0x20, 0x6e, + 0x7e, 0x6f, 0xe6, 0xbd, 0xf7, 0x7d, 0xdf, 0xcc, 0x7e, 0x63, 0x58, 0x3e, 0x68, 0xb7, 0x18, 0x79, + 0xd2, 0x64, 0x49, 0x9b, 0xb4, 0x2a, 0x35, 0x26, 0x69, 0x85, 0xd4, 0x9a, 0x71, 0x50, 0x67, 0xc2, + 0x6b, 0x24, 0x5c, 0x72, 0x8c, 0xd5, 0x0e, 0x4f, 0xef, 0xf0, 0xec, 0x0e, 0x67, 0x75, 0x97, 0x8b, + 0x43, 0x2e, 0x48, 0x8d, 0x8a, 0xe1, 0xe2, 0x06, 0x0d, 0xa3, 0x98, 0xca, 0x88, 0xc7, 0xa6, 0xde, + 0x59, 0x0a, 0x79, 0xc8, 0xf5, 0x4f, 0xa2, 0x7e, 0xd9, 0xec, 0xb5, 0x90, 0xf3, 0xb0, 0xce, 0x08, + 0x6d, 0x44, 0x84, 0xc6, 0x31, 0x97, 0xba, 0xc4, 0xce, 0x74, 0x5c, 0x8d, 0xca, 0xe2, 0x18, 0x8f, + 0xcb, 0x7d, 0x06, 0xd7, 0x3e, 0x52, 0x93, 0xb7, 0xa2, 0x98, 0xd6, 0xa3, 0x2f, 0x59, 0xb0, 0x69, + 0x96, 0x7d, 0xf6, 0xa4, 0xc9, 0x84, 0xc4, 0x5b, 0x00, 0x7d, 0x2c, 0x25, 0xb4, 0x8c, 0x56, 0x16, + 0xd6, 0x5e, 0xf7, 0x0c, 0x70, 0x4f, 0x01, 0x1f, 0xe4, 0xe4, 0x3d, 0xa6, 0x21, 0xb3, 0xb5, 0x7e, + 0xa6, 0x12, 0x5f, 0x85, 0xf9, 0x06, 0xe7, 0xf5, 0x6a, 0x14, 0x94, 0x0a, 0xcb, 0x68, 0x65, 0xd6, + 0x9f, 0x53, 0xe1, 0x76, 0xe0, 0xfe, 0x86, 0xe0, 0xfa, 0x04, 0x04, 0xa2, 0xc1, 0x63, 0xc1, 0xf0, + 0xa7, 0x70, 0x79, 0x2f, 0x5d, 0xab, 0x5a, 0xf4, 0x25, 0xb4, 0x7c, 0x61, 0x65, 0x61, 0xed, 0x86, + 0xa7, 0x65, 0x4d, 0x29, 0xa5, 0x20, 0x86, 0x5a, 0x6d, 0xce, 0x3e, 0xff, 0xeb, 0xd5, 0x19, 0x7f, + 0x71, 0x6f, 0x68, 0x02, 0x7e, 0x6f, 0x80, 0x5c, 0x41, 0x93, 0xbb, 0x39, 0x95, 0x9c, 0x81, 0x95, + 0x65, 0xe7, 0x6e, 0xc1, 0x2b, 0xe3, 0x38, 0xa4, 0x22, 0x66, 0xc8, 0xa3, 0x2c, 0x79, 0xfc, 0x12, + 0x14, 0x7a, 0x82, 0x14, 0xa2, 0xc0, 0x6d, 0x8d, 0x3f, 0x8d, 0x9e, 0x14, 0x3b, 0xb0, 0x38, 0x2c, + 0x85, 0x3d, 0x93, 0x33, 0x29, 0x71, 0x69, 0x48, 0x09, 0xf7, 0x13, 0x78, 0x6d, 0xec, 0x19, 0x6c, + 0xb6, 0xdf, 0x67, 0x51, 0xb8, 0x2f, 0xa7, 0x12, 0xb9, 0x02, 0x73, 0xfb, 0x7a, 0x67, 0x7a, 0xba, + 0x26, 0x72, 0x9f, 0xc1, 0x8d, 0x29, 0x8d, 0xff, 0x67, 0x66, 0x77, 0xed, 0xed, 0xba, 0xd7, 0x4c, + 0x12, 0x16, 0xcb, 0x1d, 0x2e, 0xd9, 0xc7, 0x92, 0xca, 0xa6, 0x98, 0x46, 0xc9, 0xfd, 0x0a, 0x41, + 0x79, 0x52, 0xa9, 0x05, 0xbd, 0x04, 0x2f, 0xb4, 0x68, 0xbd, 0x57, 0x69, 0x02, 0x5c, 0x82, 0xf9, + 0x28, 0x36, 0x79, 0x23, 0x46, 0x1a, 0xaa, 0x15, 0x5a, 0x13, 0x92, 0x46, 0x71, 0xe9, 0x82, 0x59, + 0xb1, 0xa1, 0xea, 0x24, 0xb9, 0xa4, 0xf5, 0xd2, 0xac, 0xe9, 0xa4, 0x03, 0xd7, 0x87, 0xab, 0x06, + 0x01, 0x8d, 0x77, 0x54, 0x03, 0x2a, 0xa7, 0x5f, 0xa9, 0x32, 0x40, 0x8b, 0xd6, 0x69, 0x10, 0x24, + 0x4c, 0x08, 0x0d, 0xa0, 0xe8, 0x67, 0x32, 0xee, 0x23, 0x28, 0x8d, 0xf6, 0xb4, 0x7c, 0x1c, 0xb8, + 0xd8, 0xe0, 0x42, 0x44, 0x35, 0x2b, 0xfe, 0x45, 0xbf, 0x17, 0xab, 0x13, 0x4e, 0x18, 0x15, 0xf6, + 0x3b, 0x29, 0xfa, 0x36, 0x72, 0xbf, 0x41, 0x70, 0x25, 0x6d, 0xf8, 0x38, 0xe1, 0x0d, 0x2e, 0x58, + 0x9e, 0xdb, 0x22, 0x24, 0x3d, 0x60, 0x49, 0xda, 0xcb, 0x44, 0x7a, 0xbe, 0x69, 0x91, 0x68, 0x81, + 0x8a, 0x7e, 0x2f, 0xc6, 0xd7, 0x01, 0xf6, 0x12, 0x7e, 0x58, 0x8d, 0xe2, 0x80, 0x3d, 0xb5, 0x32, + 0x15, 0x55, 0x66, 0x5b, 0x25, 0xdc, 0x0f, 0xfb, 0x52, 0xf5, 0x50, 0xfc, 0x07, 0x56, 0x1d, 0x78, + 0xb9, 0xa7, 0x12, 0x97, 0xe7, 0x67, 0xa4, 0x6e, 0x08, 0x97, 0x3d, 0x3a, 0x26, 0x50, 0x5c, 0x84, + 0xe4, 0x09, 0x0d, 0x99, 0xea, 0x34, 0xab, 0x97, 0x8a, 0x36, 0xb3, 0x1d, 0xb8, 0x0f, 0x60, 0x69, + 0x70, 0xf8, 0xf9, 0x89, 0xac, 0x7d, 0x0f, 0xf0, 0xa2, 0x6e, 0x96, 0x7a, 0xde, 0x4f, 0x08, 0x16, + 0x87, 0xbf, 0x46, 0x7c, 0xdb, 0x1b, 0x7d, 0x9e, 0xbc, 0xd3, 0xde, 0x05, 0xa7, 0x72, 0x86, 0x0a, + 0x03, 0xdf, 0xbd, 0xf3, 0xf5, 0xef, 0xff, 0x7c, 0x57, 0xa8, 0x60, 0x42, 0xc6, 0xbc, 0x96, 0x23, + 0x0e, 0x4f, 0x3a, 0x56, 0xe9, 0x2e, 0xfe, 0x19, 0xc1, 0xa5, 0xa1, 0xae, 0x98, 0xe4, 0x9d, 0x9f, + 0x02, 0xbe, 0x9d, 0xbf, 0xc0, 0xe2, 0x7d, 0x4b, 0xe3, 0x7d, 0x13, 0xaf, 0xe7, 0xc1, 0xdb, 0x87, + 0x4b, 0x3a, 0x0a, 0xf3, 0x9f, 0x08, 0x4a, 0x93, 0x4c, 0x0f, 0xdf, 0xcd, 0x2d, 0xde, 0x90, 0x01, + 0x3b, 0x1b, 0xe7, 0xa8, 0xb4, 0x74, 0xb6, 0x35, 0x9d, 0x7b, 0xf8, 0x9d, 0x3c, 0x74, 0xaa, 0xb5, + 0x76, 0xd5, 0x58, 0x78, 0x96, 0x98, 0xc9, 0x74, 0xf1, 0x2f, 0x08, 0x2e, 0x8f, 0xb8, 0x22, 0x9e, + 0x7c, 0x25, 0x26, 0x99, 0xaf, 0xb3, 0x76, 0x96, 0x12, 0xcb, 0x63, 0x43, 0xf3, 0x58, 0xc7, 0x95, + 0x71, 0x3c, 0x76, 0x4d, 0x59, 0x55, 0x7d, 0x67, 0x55, 0xa1, 0x0b, 0x33, 0x17, 0xe9, 0x07, 0x04, + 0x0b, 0x19, 0xdf, 0xc3, 0xb7, 0x26, 0x8f, 0x1f, 0x71, 0x5c, 0xe7, 0x8d, 0x7c, 0x9b, 0x2d, 0xca, + 0xb7, 0x35, 0xca, 0x0d, 0x7c, 0x67, 0x2c, 0x4a, 0x1a, 0x57, 0x5b, 0xb6, 0x22, 0xab, 0x6f, 0xdf, + 0xa6, 0xbb, 0xf8, 0x57, 0x04, 0xd0, 0x37, 0x33, 0xbc, 0x7a, 0xda, 0xf4, 0x41, 0xdf, 0x75, 0x6e, + 0xe5, 0xda, 0x6b, 0x81, 0xfa, 0x1a, 0xe8, 0x43, 0xfc, 0x60, 0x12, 0x50, 0xeb, 0xc0, 0x59, 0x9c, + 0xc6, 0xdc, 0xba, 0xa4, 0x93, 0xba, 0x73, 0x97, 0x74, 0xfa, 0xe6, 0xdc, 0xc5, 0x3f, 0x22, 0x98, + 0xb7, 0xe6, 0x85, 0x6f, 0x9e, 0x2a, 0x5b, 0xdf, 0x5b, 0x9d, 0x95, 0xe9, 0x1b, 0x2d, 0xe4, 0x87, + 0x1a, 0xf2, 0x16, 0xbe, 0x3f, 0x51, 0x5b, 0x2e, 0xc7, 0xe3, 0xd5, 0xf6, 0xab, 0x13, 0xa9, 0xfb, + 0x76, 0x37, 0xef, 0x3f, 0x3f, 0x2e, 0xa3, 0xa3, 0xe3, 0x32, 0xfa, 0xfb, 0xb8, 0x8c, 0xbe, 0x3d, + 0x29, 0xcf, 0x1c, 0x9d, 0x94, 0x67, 0xfe, 0x38, 0x29, 0xcf, 0x7c, 0xbe, 0x1a, 0x46, 0x72, 0xbf, + 0x59, 0xf3, 0x76, 0xf9, 0x21, 0xf9, 0xe0, 0xb3, 0x9d, 0x77, 0x1f, 0x31, 0xf9, 0x05, 0x4f, 0x0e, + 0xc8, 0xee, 0x3e, 0x8d, 0x62, 0xf2, 0xd4, 0x0e, 0x96, 0xed, 0x06, 0x13, 0xb5, 0x39, 0xfd, 0x77, + 0x7a, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0xbd, 0xf0, 0x82, 0x0a, 0x0c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryBundlesClient is the client API for QueryBundles service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryBundlesClient interface { + // FinalizedBundles ... + FinalizedBundles(ctx context.Context, in *QueryFinalizedBundlesRequest, opts ...grpc.CallOption) (*QueryFinalizedBundlesResponse, error) + // FinalizedBundle ... + FinalizedBundle(ctx context.Context, in *QueryFinalizedBundleRequest, opts ...grpc.CallOption) (*QueryFinalizedBundleResponse, error) + // Queries the bundle which contains the data given height + FinalizedBundlesByHeight(ctx context.Context, in *QueryFinalizedBundlesByHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedBundlesByHeightResponse, error) + // CurrentVoteStatus ... + CurrentVoteStatus(ctx context.Context, in *QueryCurrentVoteStatusRequest, opts ...grpc.CallOption) (*QueryCurrentVoteStatusResponse, error) + // CanValidate ... + CanValidate(ctx context.Context, in *QueryCanValidateRequest, opts ...grpc.CallOption) (*QueryCanValidateResponse, error) + // CanPropose ... + CanPropose(ctx context.Context, in *QueryCanProposeRequest, opts ...grpc.CallOption) (*QueryCanProposeResponse, error) + // CanVote checks if voter on pool can still vote for the given bundle + CanVote(ctx context.Context, in *QueryCanVoteRequest, opts ...grpc.CallOption) (*QueryCanVoteResponse, error) +} + +type queryBundlesClient struct { + cc grpc1.ClientConn +} + +func NewQueryBundlesClient(cc grpc1.ClientConn) QueryBundlesClient { + return &queryBundlesClient{cc} +} + +func (c *queryBundlesClient) FinalizedBundles(ctx context.Context, in *QueryFinalizedBundlesRequest, opts ...grpc.CallOption) (*QueryFinalizedBundlesResponse, error) { + out := new(QueryFinalizedBundlesResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/FinalizedBundles", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryBundlesClient) FinalizedBundle(ctx context.Context, in *QueryFinalizedBundleRequest, opts ...grpc.CallOption) (*QueryFinalizedBundleResponse, error) { + out := new(QueryFinalizedBundleResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/FinalizedBundle", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryBundlesClient) FinalizedBundlesByHeight(ctx context.Context, in *QueryFinalizedBundlesByHeightRequest, opts ...grpc.CallOption) (*QueryFinalizedBundlesByHeightResponse, error) { + out := new(QueryFinalizedBundlesByHeightResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/FinalizedBundlesByHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryBundlesClient) CurrentVoteStatus(ctx context.Context, in *QueryCurrentVoteStatusRequest, opts ...grpc.CallOption) (*QueryCurrentVoteStatusResponse, error) { + out := new(QueryCurrentVoteStatusResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/CurrentVoteStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryBundlesClient) CanValidate(ctx context.Context, in *QueryCanValidateRequest, opts ...grpc.CallOption) (*QueryCanValidateResponse, error) { + out := new(QueryCanValidateResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/CanValidate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryBundlesClient) CanPropose(ctx context.Context, in *QueryCanProposeRequest, opts ...grpc.CallOption) (*QueryCanProposeResponse, error) { + out := new(QueryCanProposeResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/CanPropose", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryBundlesClient) CanVote(ctx context.Context, in *QueryCanVoteRequest, opts ...grpc.CallOption) (*QueryCanVoteResponse, error) { + out := new(QueryCanVoteResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryBundles/CanVote", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryBundlesServer is the server API for QueryBundles service. +type QueryBundlesServer interface { + // FinalizedBundles ... + FinalizedBundles(context.Context, *QueryFinalizedBundlesRequest) (*QueryFinalizedBundlesResponse, error) + // FinalizedBundle ... + FinalizedBundle(context.Context, *QueryFinalizedBundleRequest) (*QueryFinalizedBundleResponse, error) + // Queries the bundle which contains the data given height + FinalizedBundlesByHeight(context.Context, *QueryFinalizedBundlesByHeightRequest) (*QueryFinalizedBundlesByHeightResponse, error) + // CurrentVoteStatus ... + CurrentVoteStatus(context.Context, *QueryCurrentVoteStatusRequest) (*QueryCurrentVoteStatusResponse, error) + // CanValidate ... + CanValidate(context.Context, *QueryCanValidateRequest) (*QueryCanValidateResponse, error) + // CanPropose ... + CanPropose(context.Context, *QueryCanProposeRequest) (*QueryCanProposeResponse, error) + // CanVote checks if voter on pool can still vote for the given bundle + CanVote(context.Context, *QueryCanVoteRequest) (*QueryCanVoteResponse, error) +} + +// UnimplementedQueryBundlesServer can be embedded to have forward compatible implementations. +type UnimplementedQueryBundlesServer struct { +} + +func (*UnimplementedQueryBundlesServer) FinalizedBundles(ctx context.Context, req *QueryFinalizedBundlesRequest) (*QueryFinalizedBundlesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizedBundles not implemented") +} +func (*UnimplementedQueryBundlesServer) FinalizedBundle(ctx context.Context, req *QueryFinalizedBundleRequest) (*QueryFinalizedBundleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizedBundle not implemented") +} +func (*UnimplementedQueryBundlesServer) FinalizedBundlesByHeight(ctx context.Context, req *QueryFinalizedBundlesByHeightRequest) (*QueryFinalizedBundlesByHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizedBundlesByHeight not implemented") +} +func (*UnimplementedQueryBundlesServer) CurrentVoteStatus(ctx context.Context, req *QueryCurrentVoteStatusRequest) (*QueryCurrentVoteStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CurrentVoteStatus not implemented") +} +func (*UnimplementedQueryBundlesServer) CanValidate(ctx context.Context, req *QueryCanValidateRequest) (*QueryCanValidateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CanValidate not implemented") +} +func (*UnimplementedQueryBundlesServer) CanPropose(ctx context.Context, req *QueryCanProposeRequest) (*QueryCanProposeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CanPropose not implemented") +} +func (*UnimplementedQueryBundlesServer) CanVote(ctx context.Context, req *QueryCanVoteRequest) (*QueryCanVoteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CanVote not implemented") +} + +func RegisterQueryBundlesServer(s grpc1.Server, srv QueryBundlesServer) { + s.RegisterService(&_QueryBundles_serviceDesc, srv) +} + +func _QueryBundles_FinalizedBundles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinalizedBundlesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).FinalizedBundles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/FinalizedBundles", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).FinalizedBundles(ctx, req.(*QueryFinalizedBundlesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryBundles_FinalizedBundle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinalizedBundleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).FinalizedBundle(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/FinalizedBundle", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).FinalizedBundle(ctx, req.(*QueryFinalizedBundleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryBundles_FinalizedBundlesByHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryFinalizedBundlesByHeightRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).FinalizedBundlesByHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/FinalizedBundlesByHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).FinalizedBundlesByHeight(ctx, req.(*QueryFinalizedBundlesByHeightRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryBundles_CurrentVoteStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryCurrentVoteStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).CurrentVoteStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/CurrentVoteStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).CurrentVoteStatus(ctx, req.(*QueryCurrentVoteStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryBundles_CanValidate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryCanValidateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).CanValidate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/CanValidate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).CanValidate(ctx, req.(*QueryCanValidateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryBundles_CanPropose_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryCanProposeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).CanPropose(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/CanPropose", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).CanPropose(ctx, req.(*QueryCanProposeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryBundles_CanVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryCanVoteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryBundlesServer).CanVote(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryBundles/CanVote", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryBundlesServer).CanVote(ctx, req.(*QueryCanVoteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryBundles_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.query.v1beta1.QueryBundles", + HandlerType: (*QueryBundlesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FinalizedBundles", + Handler: _QueryBundles_FinalizedBundles_Handler, + }, + { + MethodName: "FinalizedBundle", + Handler: _QueryBundles_FinalizedBundle_Handler, + }, + { + MethodName: "FinalizedBundlesByHeight", + Handler: _QueryBundles_FinalizedBundlesByHeight_Handler, + }, + { + MethodName: "CurrentVoteStatus", + Handler: _QueryBundles_CurrentVoteStatus_Handler, + }, + { + MethodName: "CanValidate", + Handler: _QueryBundles_CanValidate_Handler, + }, + { + MethodName: "CanPropose", + Handler: _QueryBundles_CanPropose_Handler, + }, + { + MethodName: "CanVote", + Handler: _QueryBundles_CanVote_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/query/v1beta1/bundles.proto", +} + +func (m *QueryFinalizedBundlesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedBundlesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedBundlesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x10 + } + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBundles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedBundlesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedBundlesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedBundlesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBundles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.FinalizedBundles) > 0 { + for iNdEx := len(m.FinalizedBundles) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FinalizedBundles[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBundles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedBundleRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedBundleRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedBundleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedBundleResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedBundleResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedBundleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.FinalizedBundle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBundles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedBundlesByHeightRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedBundlesByHeightRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedBundlesByHeightRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryFinalizedBundlesByHeightResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryFinalizedBundlesByHeightResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryFinalizedBundlesByHeightResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.FinalizedBundle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBundles(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryCurrentVoteStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCurrentVoteStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCurrentVoteStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCurrentVoteStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCurrentVoteStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCurrentVoteStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Total != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Total)) + i-- + dAtA[i] = 0x20 + } + if m.Abstain != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Abstain)) + i-- + dAtA[i] = 0x18 + } + if m.Invalid != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Invalid)) + i-- + dAtA[i] = 0x10 + } + if m.Valid != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.Valid)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCanValidateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCanValidateRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCanValidateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Valaddress) > 0 { + i -= len(m.Valaddress) + copy(dAtA[i:], m.Valaddress) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Valaddress))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCanValidateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCanValidateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCanValidateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + } + if m.Possible { + i-- + if m.Possible { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCanProposeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCanProposeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCanProposeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FromIndex != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.FromIndex)) + i-- + dAtA[i] = 0x20 + } + if len(m.Proposer) > 0 { + i -= len(m.Proposer) + copy(dAtA[i:], m.Proposer) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Proposer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCanProposeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCanProposeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCanProposeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + } + if m.Possible { + i-- + if m.Possible { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCanVoteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCanVoteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCanVoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StorageId) > 0 { + i -= len(m.StorageId) + copy(dAtA[i:], m.StorageId) + i = encodeVarintBundles(dAtA, i, uint64(len(m.StorageId))) + i-- + dAtA[i] = 0x22 + } + if len(m.Voter) > 0 { + i -= len(m.Voter) + copy(dAtA[i:], m.Voter) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Voter))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintBundles(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryCanVoteResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCanVoteResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCanVoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintBundles(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + } + if m.Possible { + i-- + if m.Possible { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintBundles(dAtA []byte, offset int, v uint64) int { + offset -= sovBundles(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryFinalizedBundlesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovBundles(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + return n +} + +func (m *QueryFinalizedBundlesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.FinalizedBundles) > 0 { + for _, e := range m.FinalizedBundles { + l = e.Size() + n += 1 + l + sovBundles(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovBundles(uint64(l)) + } + return n +} + +func (m *QueryFinalizedBundleRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + if m.Id != 0 { + n += 1 + sovBundles(uint64(m.Id)) + } + return n +} + +func (m *QueryFinalizedBundleResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.FinalizedBundle.Size() + n += 1 + l + sovBundles(uint64(l)) + return n +} + +func (m *QueryFinalizedBundlesByHeightRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + if m.Height != 0 { + n += 1 + sovBundles(uint64(m.Height)) + } + return n +} + +func (m *QueryFinalizedBundlesByHeightResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.FinalizedBundle.Size() + n += 1 + l + sovBundles(uint64(l)) + return n +} + +func (m *QueryCurrentVoteStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + return n +} + +func (m *QueryCurrentVoteStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Valid != 0 { + n += 1 + sovBundles(uint64(m.Valid)) + } + if m.Invalid != 0 { + n += 1 + sovBundles(uint64(m.Invalid)) + } + if m.Abstain != 0 { + n += 1 + sovBundles(uint64(m.Abstain)) + } + if m.Total != 0 { + n += 1 + sovBundles(uint64(m.Total)) + } + return n +} + +func (m *QueryCanValidateRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + l = len(m.Valaddress) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + return n +} + +func (m *QueryCanValidateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Possible { + n += 2 + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + return n +} + +func (m *QueryCanProposeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.Proposer) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + if m.FromIndex != 0 { + n += 1 + sovBundles(uint64(m.FromIndex)) + } + return n +} + +func (m *QueryCanProposeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Possible { + n += 2 + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + return n +} + +func (m *QueryCanVoteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovBundles(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.Voter) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + l = len(m.StorageId) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + return n +} + +func (m *QueryCanVoteResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Possible { + n += 2 + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovBundles(uint64(l)) + } + return n +} + +func sovBundles(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBundles(x uint64) (n int) { + return sovBundles(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryFinalizedBundlesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedBundlesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedBundlesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedBundlesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedBundlesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedBundlesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedBundles", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FinalizedBundles = append(m.FinalizedBundles, types.FinalizedBundle{}) + if err := m.FinalizedBundles[len(m.FinalizedBundles)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedBundleRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedBundleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedBundleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedBundleResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedBundleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedBundleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedBundle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinalizedBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedBundlesByHeightRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedBundlesByHeightRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedBundlesByHeightRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryFinalizedBundlesByHeightResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryFinalizedBundlesByHeightResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryFinalizedBundlesByHeightResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizedBundle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinalizedBundle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCurrentVoteStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCurrentVoteStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCurrentVoteStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCurrentVoteStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCurrentVoteStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCurrentVoteStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Valid", wireType) + } + m.Valid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Valid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Invalid", wireType) + } + m.Invalid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Invalid |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Abstain", wireType) + } + m.Abstain = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Abstain |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCanValidateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCanValidateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCanValidateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Valaddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Valaddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCanValidateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCanValidateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCanValidateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Possible", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Possible = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCanProposeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCanProposeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCanProposeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proposer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proposer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FromIndex", wireType) + } + m.FromIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FromIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCanProposeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCanProposeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCanProposeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Possible", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Possible = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCanVoteRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCanVoteRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCanVoteRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Voter", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Voter = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCanVoteResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCanVoteResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCanVoteResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Possible", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Possible = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBundles + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBundles + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBundles + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBundles(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBundles + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBundles(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBundles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBundles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBundles + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBundles + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBundles + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBundles + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBundles = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBundles = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBundles = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/bundles.pb.gw.go b/x/query/types/bundles.pb.gw.go new file mode 100644 index 00000000..ab4ed045 --- /dev/null +++ b/x/query/types/bundles.pb.gw.go @@ -0,0 +1,1011 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/query/v1beta1/bundles.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_QueryBundles_FinalizedBundles_0 = &utilities.DoubleArray{Encoding: map[string]int{"pool_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_QueryBundles_FinalizedBundles_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedBundlesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryBundles_FinalizedBundles_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.FinalizedBundles(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_FinalizedBundles_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedBundlesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryBundles_FinalizedBundles_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.FinalizedBundles(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryBundles_FinalizedBundle_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedBundleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.FinalizedBundle(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_FinalizedBundle_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedBundleRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.FinalizedBundle(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryBundles_FinalizedBundlesByHeight_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedBundlesByHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + msg, err := client.FinalizedBundlesByHeight(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_FinalizedBundlesByHeight_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryFinalizedBundlesByHeightRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["height"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "height") + } + + protoReq.Height, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "height", err) + } + + msg, err := server.FinalizedBundlesByHeight(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryBundles_CurrentVoteStatus_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCurrentVoteStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + msg, err := client.CurrentVoteStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_CurrentVoteStatus_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCurrentVoteStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + msg, err := server.CurrentVoteStatus(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryBundles_CanValidate_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCanValidateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["valaddress"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "valaddress") + } + + protoReq.Valaddress, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "valaddress", err) + } + + msg, err := client.CanValidate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_CanValidate_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCanValidateRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["valaddress"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "valaddress") + } + + protoReq.Valaddress, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "valaddress", err) + } + + msg, err := server.CanValidate(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryBundles_CanPropose_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCanProposeRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + val, ok = pathParams["proposer"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "proposer") + } + + protoReq.Proposer, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "proposer", err) + } + + val, ok = pathParams["from_index"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_index") + } + + protoReq.FromIndex, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_index", err) + } + + msg, err := client.CanPropose(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_CanPropose_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCanProposeRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + val, ok = pathParams["proposer"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "proposer") + } + + protoReq.Proposer, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "proposer", err) + } + + val, ok = pathParams["from_index"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "from_index") + } + + protoReq.FromIndex, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "from_index", err) + } + + msg, err := server.CanPropose(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryBundles_CanVote_0(ctx context.Context, marshaler runtime.Marshaler, client QueryBundlesClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCanVoteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + val, ok = pathParams["voter"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "voter") + } + + protoReq.Voter, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "voter", err) + } + + val, ok = pathParams["storage_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "storage_id") + } + + protoReq.StorageId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "storage_id", err) + } + + msg, err := client.CanVote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryBundles_CanVote_0(ctx context.Context, marshaler runtime.Marshaler, server QueryBundlesServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCanVoteRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + val, ok = pathParams["voter"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "voter") + } + + protoReq.Voter, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "voter", err) + } + + val, ok = pathParams["storage_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "storage_id") + } + + protoReq.StorageId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "storage_id", err) + } + + msg, err := server.CanVote(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryBundlesHandlerServer registers the http handlers for service QueryBundles to "mux". +// UnaryRPC :call QueryBundlesServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryBundlesHandlerFromEndpoint instead. +func RegisterQueryBundlesHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryBundlesServer) error { + + mux.Handle("GET", pattern_QueryBundles_FinalizedBundles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_FinalizedBundles_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_FinalizedBundles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_FinalizedBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_FinalizedBundle_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_FinalizedBundle_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_FinalizedBundlesByHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_FinalizedBundlesByHeight_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_FinalizedBundlesByHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CurrentVoteStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_CurrentVoteStatus_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CurrentVoteStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CanValidate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_CanValidate_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CanValidate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CanPropose_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_CanPropose_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CanPropose_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CanVote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryBundles_CanVote_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CanVote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryBundlesHandlerFromEndpoint is same as RegisterQueryBundlesHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryBundlesHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryBundlesHandler(ctx, mux, conn) +} + +// RegisterQueryBundlesHandler registers the http handlers for service QueryBundles to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryBundlesHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryBundlesHandlerClient(ctx, mux, NewQueryBundlesClient(conn)) +} + +// RegisterQueryBundlesHandlerClient registers the http handlers for service QueryBundles +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryBundlesClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryBundlesClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryBundlesClient" to call the correct interceptors. +func RegisterQueryBundlesHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryBundlesClient) error { + + mux.Handle("GET", pattern_QueryBundles_FinalizedBundles_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_FinalizedBundles_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_FinalizedBundles_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_FinalizedBundle_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_FinalizedBundle_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_FinalizedBundle_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_FinalizedBundlesByHeight_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_FinalizedBundlesByHeight_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_FinalizedBundlesByHeight_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CurrentVoteStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_CurrentVoteStatus_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CurrentVoteStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CanValidate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_CanValidate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CanValidate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CanPropose_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_CanPropose_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CanPropose_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryBundles_CanVote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryBundles_CanVote_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryBundles_CanVote_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_QueryBundles_FinalizedBundles_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "finalized_bundles", "pool_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryBundles_FinalizedBundle_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"kyve", "query", "v1beta1", "finalized_bundle", "pool_id", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryBundles_FinalizedBundlesByHeight_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"kyve", "query", "v1beta1", "finalized_bundle_by_height", "pool_id", "height"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryBundles_CurrentVoteStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "current_vote_status", "pool_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryBundles_CanValidate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"kyve", "query", "v1beta1", "can_validate", "pool_id", "valaddress"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryBundles_CanPropose_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5, 1, 0, 4, 1, 5, 6, 1, 0, 4, 1, 5, 7}, []string{"kyve", "query", "v1beta1", "can_propose", "pool_id", "staker", "proposer", "from_index"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryBundles_CanVote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5, 1, 0, 4, 1, 5, 6, 1, 0, 4, 1, 5, 7}, []string{"kyve", "query", "v1beta1", "can_vote", "pool_id", "staker", "voter", "storage_id"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_QueryBundles_FinalizedBundles_0 = runtime.ForwardResponseMessage + + forward_QueryBundles_FinalizedBundle_0 = runtime.ForwardResponseMessage + + forward_QueryBundles_FinalizedBundlesByHeight_0 = runtime.ForwardResponseMessage + + forward_QueryBundles_CurrentVoteStatus_0 = runtime.ForwardResponseMessage + + forward_QueryBundles_CanValidate_0 = runtime.ForwardResponseMessage + + forward_QueryBundles_CanPropose_0 = runtime.ForwardResponseMessage + + forward_QueryBundles_CanVote_0 = runtime.ForwardResponseMessage +) diff --git a/x/query/types/codec.go b/x/query/types/codec.go new file mode 100644 index 00000000..b2175ba1 --- /dev/null +++ b/x/query/types/codec.go @@ -0,0 +1,11 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" +) + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/query/types/delegation.pb.go b/x/query/types/delegation.pb.go new file mode 100644 index 00000000..8873c1b4 --- /dev/null +++ b/x/query/types/delegation.pb.go @@ -0,0 +1,2382 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/delegation.proto + +package types + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryDelegatorRequest is the request type for the Query/Delegator RPC method. +type QueryDelegatorRequest struct { + // staker ... + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // delegator ... + Delegator string `protobuf:"bytes,2,opt,name=delegator,proto3" json:"delegator,omitempty"` +} + +func (m *QueryDelegatorRequest) Reset() { *m = QueryDelegatorRequest{} } +func (m *QueryDelegatorRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDelegatorRequest) ProtoMessage() {} +func (*QueryDelegatorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{0} +} +func (m *QueryDelegatorRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDelegatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDelegatorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDelegatorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDelegatorRequest.Merge(m, src) +} +func (m *QueryDelegatorRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDelegatorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDelegatorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDelegatorRequest proto.InternalMessageInfo + +func (m *QueryDelegatorRequest) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *QueryDelegatorRequest) GetDelegator() string { + if m != nil { + return m.Delegator + } + return "" +} + +// QueryDelegatorResponse is the response type for the Query/Delegator RPC method. +type QueryDelegatorResponse struct { + // delegator ... + Delegator *StakerDelegatorResponse `protobuf:"bytes,1,opt,name=delegator,proto3" json:"delegator,omitempty"` +} + +func (m *QueryDelegatorResponse) Reset() { *m = QueryDelegatorResponse{} } +func (m *QueryDelegatorResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDelegatorResponse) ProtoMessage() {} +func (*QueryDelegatorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{1} +} +func (m *QueryDelegatorResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDelegatorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDelegatorResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDelegatorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDelegatorResponse.Merge(m, src) +} +func (m *QueryDelegatorResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDelegatorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDelegatorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDelegatorResponse proto.InternalMessageInfo + +func (m *QueryDelegatorResponse) GetDelegator() *StakerDelegatorResponse { + if m != nil { + return m.Delegator + } + return nil +} + +// StakerDelegatorResponse ... +type StakerDelegatorResponse struct { + // delegator ... + Delegator string `protobuf:"bytes,1,opt,name=delegator,proto3" json:"delegator,omitempty"` + // current_reward ... + CurrentReward uint64 `protobuf:"varint,2,opt,name=current_reward,json=currentReward,proto3" json:"current_reward,omitempty"` + // delegation_amount ... + DelegationAmount uint64 `protobuf:"varint,3,opt,name=delegation_amount,json=delegationAmount,proto3" json:"delegation_amount,omitempty"` + // staker ... + Staker string `protobuf:"bytes,4,opt,name=staker,proto3" json:"staker,omitempty"` +} + +func (m *StakerDelegatorResponse) Reset() { *m = StakerDelegatorResponse{} } +func (m *StakerDelegatorResponse) String() string { return proto.CompactTextString(m) } +func (*StakerDelegatorResponse) ProtoMessage() {} +func (*StakerDelegatorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{2} +} +func (m *StakerDelegatorResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StakerDelegatorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StakerDelegatorResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StakerDelegatorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StakerDelegatorResponse.Merge(m, src) +} +func (m *StakerDelegatorResponse) XXX_Size() int { + return m.Size() +} +func (m *StakerDelegatorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StakerDelegatorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StakerDelegatorResponse proto.InternalMessageInfo + +func (m *StakerDelegatorResponse) GetDelegator() string { + if m != nil { + return m.Delegator + } + return "" +} + +func (m *StakerDelegatorResponse) GetCurrentReward() uint64 { + if m != nil { + return m.CurrentReward + } + return 0 +} + +func (m *StakerDelegatorResponse) GetDelegationAmount() uint64 { + if m != nil { + return m.DelegationAmount + } + return 0 +} + +func (m *StakerDelegatorResponse) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +// QueryDelegatorsByStakerRequest ... +type QueryDelegatorsByStakerRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` +} + +func (m *QueryDelegatorsByStakerRequest) Reset() { *m = QueryDelegatorsByStakerRequest{} } +func (m *QueryDelegatorsByStakerRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDelegatorsByStakerRequest) ProtoMessage() {} +func (*QueryDelegatorsByStakerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{3} +} +func (m *QueryDelegatorsByStakerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDelegatorsByStakerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDelegatorsByStakerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDelegatorsByStakerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDelegatorsByStakerRequest.Merge(m, src) +} +func (m *QueryDelegatorsByStakerRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDelegatorsByStakerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDelegatorsByStakerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDelegatorsByStakerRequest proto.InternalMessageInfo + +func (m *QueryDelegatorsByStakerRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryDelegatorsByStakerRequest) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +// QueryDelegatorsByStakerResponse ... +type QueryDelegatorsByStakerResponse struct { + // delegators ... + Delegators []StakerDelegatorResponse `protobuf:"bytes,1,rep,name=delegators,proto3" json:"delegators"` + // total_delegation ... (consider metadata object) + TotalDelegation uint64 `protobuf:"varint,2,opt,name=total_delegation,json=totalDelegation,proto3" json:"total_delegation,omitempty"` + // total_delegation ... + TotalDelegatorCount uint64 `protobuf:"varint,3,opt,name=total_delegator_count,json=totalDelegatorCount,proto3" json:"total_delegator_count,omitempty"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,4,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDelegatorsByStakerResponse) Reset() { *m = QueryDelegatorsByStakerResponse{} } +func (m *QueryDelegatorsByStakerResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDelegatorsByStakerResponse) ProtoMessage() {} +func (*QueryDelegatorsByStakerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{4} +} +func (m *QueryDelegatorsByStakerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDelegatorsByStakerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDelegatorsByStakerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDelegatorsByStakerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDelegatorsByStakerResponse.Merge(m, src) +} +func (m *QueryDelegatorsByStakerResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDelegatorsByStakerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDelegatorsByStakerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDelegatorsByStakerResponse proto.InternalMessageInfo + +func (m *QueryDelegatorsByStakerResponse) GetDelegators() []StakerDelegatorResponse { + if m != nil { + return m.Delegators + } + return nil +} + +func (m *QueryDelegatorsByStakerResponse) GetTotalDelegation() uint64 { + if m != nil { + return m.TotalDelegation + } + return 0 +} + +func (m *QueryDelegatorsByStakerResponse) GetTotalDelegatorCount() uint64 { + if m != nil { + return m.TotalDelegatorCount + } + return 0 +} + +func (m *QueryDelegatorsByStakerResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryStakersByDelegatorRequest ... +type QueryStakersByDelegatorRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // delegator ... + Delegator string `protobuf:"bytes,2,opt,name=delegator,proto3" json:"delegator,omitempty"` +} + +func (m *QueryStakersByDelegatorRequest) Reset() { *m = QueryStakersByDelegatorRequest{} } +func (m *QueryStakersByDelegatorRequest) String() string { return proto.CompactTextString(m) } +func (*QueryStakersByDelegatorRequest) ProtoMessage() {} +func (*QueryStakersByDelegatorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{5} +} +func (m *QueryStakersByDelegatorRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersByDelegatorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersByDelegatorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersByDelegatorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersByDelegatorRequest.Merge(m, src) +} +func (m *QueryStakersByDelegatorRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersByDelegatorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersByDelegatorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersByDelegatorRequest proto.InternalMessageInfo + +func (m *QueryStakersByDelegatorRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryStakersByDelegatorRequest) GetDelegator() string { + if m != nil { + return m.Delegator + } + return "" +} + +// QueryStakersByDelegatorResponse ... +type QueryStakersByDelegatorResponse struct { + // delegator ... + Delegator string `protobuf:"bytes,1,opt,name=delegator,proto3" json:"delegator,omitempty"` + // stakers ... + Stakers []DelegationForStakerResponse `protobuf:"bytes,2,rep,name=stakers,proto3" json:"stakers"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryStakersByDelegatorResponse) Reset() { *m = QueryStakersByDelegatorResponse{} } +func (m *QueryStakersByDelegatorResponse) String() string { return proto.CompactTextString(m) } +func (*QueryStakersByDelegatorResponse) ProtoMessage() {} +func (*QueryStakersByDelegatorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{6} +} +func (m *QueryStakersByDelegatorResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersByDelegatorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersByDelegatorResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersByDelegatorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersByDelegatorResponse.Merge(m, src) +} +func (m *QueryStakersByDelegatorResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersByDelegatorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersByDelegatorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersByDelegatorResponse proto.InternalMessageInfo + +func (m *QueryStakersByDelegatorResponse) GetDelegator() string { + if m != nil { + return m.Delegator + } + return "" +} + +func (m *QueryStakersByDelegatorResponse) GetStakers() []DelegationForStakerResponse { + if m != nil { + return m.Stakers + } + return nil +} + +func (m *QueryStakersByDelegatorResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// DelegationForStakerResponse ... +type DelegationForStakerResponse struct { + // staker ... + Staker *FullStaker `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // current_reward ... + CurrentReward uint64 `protobuf:"varint,2,opt,name=current_reward,json=currentReward,proto3" json:"current_reward,omitempty"` + // delegation_amount ... + DelegationAmount uint64 `protobuf:"varint,3,opt,name=delegation_amount,json=delegationAmount,proto3" json:"delegation_amount,omitempty"` +} + +func (m *DelegationForStakerResponse) Reset() { *m = DelegationForStakerResponse{} } +func (m *DelegationForStakerResponse) String() string { return proto.CompactTextString(m) } +func (*DelegationForStakerResponse) ProtoMessage() {} +func (*DelegationForStakerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5e1c28c162a0498a, []int{7} +} +func (m *DelegationForStakerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DelegationForStakerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DelegationForStakerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DelegationForStakerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DelegationForStakerResponse.Merge(m, src) +} +func (m *DelegationForStakerResponse) XXX_Size() int { + return m.Size() +} +func (m *DelegationForStakerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DelegationForStakerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DelegationForStakerResponse proto.InternalMessageInfo + +func (m *DelegationForStakerResponse) GetStaker() *FullStaker { + if m != nil { + return m.Staker + } + return nil +} + +func (m *DelegationForStakerResponse) GetCurrentReward() uint64 { + if m != nil { + return m.CurrentReward + } + return 0 +} + +func (m *DelegationForStakerResponse) GetDelegationAmount() uint64 { + if m != nil { + return m.DelegationAmount + } + return 0 +} + +func init() { + proto.RegisterType((*QueryDelegatorRequest)(nil), "kyve.query.v1beta1.QueryDelegatorRequest") + proto.RegisterType((*QueryDelegatorResponse)(nil), "kyve.query.v1beta1.QueryDelegatorResponse") + proto.RegisterType((*StakerDelegatorResponse)(nil), "kyve.query.v1beta1.StakerDelegatorResponse") + proto.RegisterType((*QueryDelegatorsByStakerRequest)(nil), "kyve.query.v1beta1.QueryDelegatorsByStakerRequest") + proto.RegisterType((*QueryDelegatorsByStakerResponse)(nil), "kyve.query.v1beta1.QueryDelegatorsByStakerResponse") + proto.RegisterType((*QueryStakersByDelegatorRequest)(nil), "kyve.query.v1beta1.QueryStakersByDelegatorRequest") + proto.RegisterType((*QueryStakersByDelegatorResponse)(nil), "kyve.query.v1beta1.QueryStakersByDelegatorResponse") + proto.RegisterType((*DelegationForStakerResponse)(nil), "kyve.query.v1beta1.DelegationForStakerResponse") +} + +func init() { + proto.RegisterFile("kyve/query/v1beta1/delegation.proto", fileDescriptor_5e1c28c162a0498a) +} + +var fileDescriptor_5e1c28c162a0498a = []byte{ + // 699 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x6b, 0x13, 0x4f, + 0x18, 0xc7, 0x33, 0x69, 0xe8, 0x8f, 0x4e, 0xf9, 0xd9, 0x3a, 0xda, 0x1a, 0x62, 0xd9, 0x96, 0x88, + 0x7f, 0xda, 0xc2, 0x0e, 0x4d, 0x4b, 0x41, 0xf1, 0x62, 0xac, 0x15, 0x11, 0xff, 0x34, 0x82, 0xa0, + 0x97, 0x30, 0xd9, 0x0e, 0xdb, 0xd0, 0x74, 0x27, 0x9d, 0x99, 0x6d, 0x0d, 0xa5, 0x20, 0x1e, 0x3c, + 0x0b, 0x5e, 0xc5, 0x8b, 0x27, 0xf1, 0x0d, 0x08, 0xbe, 0x81, 0x1e, 0x0b, 0x5e, 0xf4, 0x22, 0xd2, + 0xf8, 0x42, 0x64, 0x67, 0x27, 0xd9, 0xd9, 0xcd, 0x26, 0x6d, 0x45, 0x6f, 0xc9, 0x33, 0xcf, 0x33, + 0xf3, 0x9d, 0xcf, 0xf7, 0x79, 0x66, 0xe1, 0xa5, 0xcd, 0xd6, 0x0e, 0xc5, 0xdb, 0x3e, 0xe5, 0x2d, + 0xbc, 0xb3, 0x50, 0xa3, 0x92, 0x2c, 0xe0, 0x75, 0xda, 0xa0, 0x2e, 0x91, 0x75, 0xe6, 0xd9, 0x4d, + 0xce, 0x24, 0x43, 0x28, 0x48, 0xb2, 0x55, 0x92, 0xad, 0x93, 0x0a, 0x73, 0x0e, 0x13, 0x5b, 0x4c, + 0xe0, 0x1a, 0x11, 0xc9, 0xfa, 0x26, 0x71, 0xeb, 0x9e, 0x51, 0x5f, 0x38, 0xef, 0x32, 0x97, 0xa9, + 0x9f, 0x38, 0xf8, 0xa5, 0xa3, 0x53, 0x2e, 0x63, 0x6e, 0x83, 0x62, 0xd2, 0xac, 0x63, 0xe2, 0x79, + 0x4c, 0xaa, 0x12, 0xa1, 0x57, 0xad, 0x14, 0x61, 0xa1, 0x02, 0xb5, 0x5e, 0x7c, 0x00, 0x27, 0xd6, + 0x82, 0xbf, 0x2b, 0xa1, 0x58, 0xc6, 0x2b, 0x74, 0xdb, 0xa7, 0x42, 0xa2, 0x49, 0x38, 0x2c, 0x24, + 0xd9, 0xa4, 0x3c, 0x0f, 0x66, 0xc0, 0xb5, 0x91, 0x8a, 0xfe, 0x87, 0xa6, 0xe0, 0xc8, 0x7a, 0x27, + 0x37, 0x9f, 0x55, 0x4b, 0x51, 0xa0, 0xe8, 0xc0, 0xc9, 0xe4, 0x76, 0xa2, 0xc9, 0x3c, 0x41, 0xd1, + 0x3d, 0xb3, 0x2e, 0xd8, 0x72, 0xb4, 0x34, 0x6f, 0xf7, 0x02, 0xb1, 0x9f, 0xa8, 0x63, 0x7a, 0xea, + 0xcd, 0x43, 0x3e, 0x00, 0x78, 0xa1, 0x4f, 0x5a, 0x5c, 0x1e, 0x48, 0xc8, 0x43, 0x97, 0xe1, 0x19, + 0xc7, 0xe7, 0x9c, 0x7a, 0xb2, 0xca, 0xe9, 0x2e, 0xe1, 0xeb, 0xea, 0x06, 0xb9, 0xca, 0xff, 0x3a, + 0x5a, 0x51, 0x41, 0x34, 0x0f, 0xcf, 0x46, 0xe6, 0x55, 0xc9, 0x16, 0xf3, 0x3d, 0x99, 0x1f, 0x52, + 0x99, 0xe3, 0xd1, 0xc2, 0x2d, 0x15, 0x37, 0x40, 0xe5, 0x4c, 0x50, 0xc5, 0x97, 0x00, 0x5a, 0x71, + 0x16, 0xa2, 0xdc, 0x0a, 0x65, 0x77, 0x18, 0xaf, 0x42, 0x18, 0x99, 0xac, 0xa1, 0x5c, 0xb1, 0xc3, + 0x8e, 0xb0, 0x83, 0x8e, 0x48, 0xb0, 0x79, 0x4c, 0x5c, 0xaa, 0x6b, 0x2b, 0x46, 0xa5, 0x21, 0x21, + 0x1b, 0x93, 0xf0, 0x2e, 0x0b, 0xa7, 0xfb, 0x4a, 0xd0, 0xc0, 0xd6, 0x20, 0xec, 0xf2, 0x11, 0x79, + 0x30, 0x33, 0x74, 0x4a, 0x63, 0xca, 0xb9, 0x83, 0x1f, 0xd3, 0x99, 0x8a, 0xb1, 0x09, 0x9a, 0x85, + 0xe3, 0x92, 0x49, 0xd2, 0xa8, 0x46, 0xac, 0x34, 0xe7, 0x31, 0x15, 0x5f, 0xe9, 0x86, 0x51, 0x09, + 0x4e, 0xc4, 0x52, 0x19, 0xaf, 0x3a, 0x06, 0xed, 0x73, 0x66, 0x3e, 0xe3, 0xb7, 0x15, 0xf0, 0xbb, + 0x31, 0x6a, 0x39, 0x45, 0xed, 0xea, 0xb1, 0xd4, 0x74, 0x1b, 0x19, 0xa5, 0xc5, 0xd7, 0x1d, 0x87, + 0xc2, 0xab, 0x89, 0x72, 0xef, 0x14, 0xfc, 0x2d, 0x87, 0x06, 0x4f, 0xcd, 0x77, 0xa0, 0x7d, 0x4a, + 0x13, 0x72, 0xa2, 0xc6, 0x7e, 0x04, 0xff, 0x0b, 0x3d, 0x17, 0xf9, 0xac, 0xb2, 0x10, 0xa7, 0x59, + 0x18, 0x81, 0x5f, 0x65, 0x3c, 0xde, 0x07, 0xda, 0xc6, 0xce, 0x2e, 0x09, 0xc8, 0x43, 0x7f, 0x0e, + 0xf9, 0x23, 0x80, 0x17, 0x07, 0x9c, 0x8b, 0x96, 0x63, 0xef, 0xcc, 0x68, 0xc9, 0x4a, 0x13, 0xbe, + 0xea, 0x37, 0x1a, 0xba, 0xae, 0xf3, 0x0e, 0xfd, 0x83, 0x51, 0x2e, 0x7d, 0xca, 0xc1, 0x31, 0x73, + 0x5e, 0x02, 0xe7, 0xde, 0x03, 0x38, 0xd2, 0x75, 0x03, 0xcd, 0xa6, 0xa9, 0x4b, 0x7d, 0x40, 0x0b, + 0x73, 0x27, 0x49, 0x0d, 0x21, 0x14, 0x6f, 0xbc, 0xfa, 0xfa, 0xeb, 0x6d, 0x76, 0x09, 0x95, 0x70, + 0xff, 0xef, 0x08, 0xe3, 0x78, 0x2f, 0xbc, 0xfb, 0x3e, 0xde, 0xeb, 0xc6, 0xf6, 0xd1, 0x67, 0x00, + 0x51, 0xef, 0x7c, 0xa3, 0xd2, 0xf1, 0xc7, 0x27, 0xdf, 0xa3, 0xc2, 0xe2, 0xa9, 0x6a, 0xb4, 0xf6, + 0xeb, 0x4a, 0xfb, 0x22, 0x5a, 0x18, 0xa8, 0x5d, 0x54, 0x6b, 0xad, 0x6a, 0x28, 0xbf, 0x7b, 0x0d, + 0xf4, 0x05, 0x40, 0xd4, 0xdb, 0xf2, 0x03, 0xa4, 0xf7, 0x1d, 0xd4, 0x01, 0xd2, 0xfb, 0xcf, 0x54, + 0xf1, 0xa6, 0x92, 0xbe, 0x8c, 0x96, 0xd2, 0xa4, 0xeb, 0x49, 0x08, 0x74, 0x1b, 0x0e, 0x44, 0xe0, + 0xcb, 0x2b, 0x07, 0x47, 0x16, 0x38, 0x3c, 0xb2, 0xc0, 0xcf, 0x23, 0x0b, 0xbc, 0x69, 0x5b, 0x99, + 0xc3, 0xb6, 0x95, 0xf9, 0xd6, 0xb6, 0x32, 0xcf, 0xe7, 0xdc, 0xba, 0xdc, 0xf0, 0x6b, 0xb6, 0xc3, + 0xb6, 0xf0, 0xfd, 0x67, 0x4f, 0xef, 0x3c, 0xa4, 0x72, 0x97, 0xf1, 0x4d, 0xec, 0x6c, 0x90, 0xba, + 0x87, 0x5f, 0xe8, 0x83, 0x64, 0xab, 0x49, 0x45, 0x6d, 0x58, 0x7d, 0x87, 0x17, 0x7f, 0x07, 0x00, + 0x00, 0xff, 0xff, 0x39, 0x97, 0xee, 0x37, 0x42, 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryDelegationClient is the client API for QueryDelegation service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryDelegationClient interface { + // Delegator returns delegation information for a specific delegator of a specific staker. + Delegator(ctx context.Context, in *QueryDelegatorRequest, opts ...grpc.CallOption) (*QueryDelegatorResponse, error) + // DelegatorsByStaker returns all delegators that have delegated to the given staker + // This query is paginated. + DelegatorsByStaker(ctx context.Context, in *QueryDelegatorsByStakerRequest, opts ...grpc.CallOption) (*QueryDelegatorsByStakerResponse, error) + // StakersByPoolAndDelegator returns all stakers the given delegator has delegated to. + // This query is paginated. + StakersByDelegator(ctx context.Context, in *QueryStakersByDelegatorRequest, opts ...grpc.CallOption) (*QueryStakersByDelegatorResponse, error) +} + +type queryDelegationClient struct { + cc grpc1.ClientConn +} + +func NewQueryDelegationClient(cc grpc1.ClientConn) QueryDelegationClient { + return &queryDelegationClient{cc} +} + +func (c *queryDelegationClient) Delegator(ctx context.Context, in *QueryDelegatorRequest, opts ...grpc.CallOption) (*QueryDelegatorResponse, error) { + out := new(QueryDelegatorResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryDelegation/Delegator", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryDelegationClient) DelegatorsByStaker(ctx context.Context, in *QueryDelegatorsByStakerRequest, opts ...grpc.CallOption) (*QueryDelegatorsByStakerResponse, error) { + out := new(QueryDelegatorsByStakerResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryDelegation/DelegatorsByStaker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryDelegationClient) StakersByDelegator(ctx context.Context, in *QueryStakersByDelegatorRequest, opts ...grpc.CallOption) (*QueryStakersByDelegatorResponse, error) { + out := new(QueryStakersByDelegatorResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryDelegation/StakersByDelegator", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryDelegationServer is the server API for QueryDelegation service. +type QueryDelegationServer interface { + // Delegator returns delegation information for a specific delegator of a specific staker. + Delegator(context.Context, *QueryDelegatorRequest) (*QueryDelegatorResponse, error) + // DelegatorsByStaker returns all delegators that have delegated to the given staker + // This query is paginated. + DelegatorsByStaker(context.Context, *QueryDelegatorsByStakerRequest) (*QueryDelegatorsByStakerResponse, error) + // StakersByPoolAndDelegator returns all stakers the given delegator has delegated to. + // This query is paginated. + StakersByDelegator(context.Context, *QueryStakersByDelegatorRequest) (*QueryStakersByDelegatorResponse, error) +} + +// UnimplementedQueryDelegationServer can be embedded to have forward compatible implementations. +type UnimplementedQueryDelegationServer struct { +} + +func (*UnimplementedQueryDelegationServer) Delegator(ctx context.Context, req *QueryDelegatorRequest) (*QueryDelegatorResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delegator not implemented") +} +func (*UnimplementedQueryDelegationServer) DelegatorsByStaker(ctx context.Context, req *QueryDelegatorsByStakerRequest) (*QueryDelegatorsByStakerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DelegatorsByStaker not implemented") +} +func (*UnimplementedQueryDelegationServer) StakersByDelegator(ctx context.Context, req *QueryStakersByDelegatorRequest) (*QueryStakersByDelegatorResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StakersByDelegator not implemented") +} + +func RegisterQueryDelegationServer(s grpc1.Server, srv QueryDelegationServer) { + s.RegisterService(&_QueryDelegation_serviceDesc, srv) +} + +func _QueryDelegation_Delegator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDelegatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryDelegationServer).Delegator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryDelegation/Delegator", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryDelegationServer).Delegator(ctx, req.(*QueryDelegatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryDelegation_DelegatorsByStaker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDelegatorsByStakerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryDelegationServer).DelegatorsByStaker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryDelegation/DelegatorsByStaker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryDelegationServer).DelegatorsByStaker(ctx, req.(*QueryDelegatorsByStakerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryDelegation_StakersByDelegator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryStakersByDelegatorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryDelegationServer).StakersByDelegator(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryDelegation/StakersByDelegator", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryDelegationServer).StakersByDelegator(ctx, req.(*QueryStakersByDelegatorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryDelegation_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.query.v1beta1.QueryDelegation", + HandlerType: (*QueryDelegationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Delegator", + Handler: _QueryDelegation_Delegator_Handler, + }, + { + MethodName: "DelegatorsByStaker", + Handler: _QueryDelegation_DelegatorsByStaker_Handler, + }, + { + MethodName: "StakersByDelegator", + Handler: _QueryDelegation_StakersByDelegator_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/query/v1beta1/delegation.proto", +} + +func (m *QueryDelegatorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDelegatorRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDelegatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Delegator) > 0 { + i -= len(m.Delegator) + copy(dAtA[i:], m.Delegator) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Delegator))) + i-- + dAtA[i] = 0x12 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDelegatorResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDelegatorResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDelegatorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Delegator != nil { + { + size, err := m.Delegator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StakerDelegatorResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StakerDelegatorResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StakerDelegatorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x22 + } + if m.DelegationAmount != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.DelegationAmount)) + i-- + dAtA[i] = 0x18 + } + if m.CurrentReward != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.CurrentReward)) + i-- + dAtA[i] = 0x10 + } + if len(m.Delegator) > 0 { + i -= len(m.Delegator) + copy(dAtA[i:], m.Delegator) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Delegator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDelegatorsByStakerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDelegatorsByStakerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDelegatorsByStakerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryDelegatorsByStakerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDelegatorsByStakerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDelegatorsByStakerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TotalDelegatorCount != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.TotalDelegatorCount)) + i-- + dAtA[i] = 0x18 + } + if m.TotalDelegation != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.TotalDelegation)) + i-- + dAtA[i] = 0x10 + } + if len(m.Delegators) > 0 { + for iNdEx := len(m.Delegators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Delegators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryStakersByDelegatorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersByDelegatorRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersByDelegatorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Delegator) > 0 { + i -= len(m.Delegator) + copy(dAtA[i:], m.Delegator) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Delegator))) + i-- + dAtA[i] = 0x12 + } + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryStakersByDelegatorResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersByDelegatorResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersByDelegatorResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Stakers) > 0 { + for iNdEx := len(m.Stakers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stakers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Delegator) > 0 { + i -= len(m.Delegator) + copy(dAtA[i:], m.Delegator) + i = encodeVarintDelegation(dAtA, i, uint64(len(m.Delegator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DelegationForStakerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DelegationForStakerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DelegationForStakerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DelegationAmount != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.DelegationAmount)) + i-- + dAtA[i] = 0x18 + } + if m.CurrentReward != 0 { + i = encodeVarintDelegation(dAtA, i, uint64(m.CurrentReward)) + i-- + dAtA[i] = 0x10 + } + if m.Staker != nil { + { + size, err := m.Staker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDelegation(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDelegation(dAtA []byte, offset int, v uint64) int { + offset -= sovDelegation(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryDelegatorRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *QueryDelegatorResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Delegator != nil { + l = m.Delegator.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *StakerDelegatorResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if m.CurrentReward != 0 { + n += 1 + sovDelegation(uint64(m.CurrentReward)) + } + if m.DelegationAmount != 0 { + n += 1 + sovDelegation(uint64(m.DelegationAmount)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *QueryDelegatorsByStakerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *QueryDelegatorsByStakerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Delegators) > 0 { + for _, e := range m.Delegators { + l = e.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + } + if m.TotalDelegation != 0 { + n += 1 + sovDelegation(uint64(m.TotalDelegation)) + } + if m.TotalDelegatorCount != 0 { + n += 1 + sovDelegation(uint64(m.TotalDelegatorCount)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *QueryStakersByDelegatorRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *QueryStakersByDelegatorResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Delegator) + if l > 0 { + n += 1 + l + sovDelegation(uint64(l)) + } + if len(m.Stakers) > 0 { + for _, e := range m.Stakers { + l = e.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + return n +} + +func (m *DelegationForStakerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Staker != nil { + l = m.Staker.Size() + n += 1 + l + sovDelegation(uint64(l)) + } + if m.CurrentReward != 0 { + n += 1 + sovDelegation(uint64(m.CurrentReward)) + } + if m.DelegationAmount != 0 { + n += 1 + sovDelegation(uint64(m.DelegationAmount)) + } + return n +} + +func sovDelegation(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDelegation(x uint64) (n int) { + return sovDelegation(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryDelegatorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDelegatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDelegatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDelegatorResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDelegatorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDelegatorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delegator == nil { + m.Delegator = &StakerDelegatorResponse{} + } + if err := m.Delegator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StakerDelegatorResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StakerDelegatorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StakerDelegatorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReward", wireType) + } + m.CurrentReward = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReward |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegationAmount", wireType) + } + m.DelegationAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelegationAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDelegatorsByStakerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDelegatorsByStakerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDelegatorsByStakerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDelegatorsByStakerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDelegatorsByStakerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDelegatorsByStakerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegators = append(m.Delegators, StakerDelegatorResponse{}) + if err := m.Delegators[len(m.Delegators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDelegation", wireType) + } + m.TotalDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDelegatorCount", wireType) + } + m.TotalDelegatorCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDelegatorCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersByDelegatorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersByDelegatorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersByDelegatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersByDelegatorResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersByDelegatorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersByDelegatorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delegator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Delegator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stakers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stakers = append(m.Stakers, DelegationForStakerResponse{}) + if err := m.Stakers[len(m.Stakers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DelegationForStakerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DelegationForStakerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DelegationForStakerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDelegation + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDelegation + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Staker == nil { + m.Staker = &FullStaker{} + } + if err := m.Staker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReward", wireType) + } + m.CurrentReward = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReward |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegationAmount", wireType) + } + m.DelegationAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDelegation + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelegationAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDelegation(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDelegation + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDelegation(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDelegation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDelegation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDelegation + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDelegation + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDelegation + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDelegation + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDelegation = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDelegation = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDelegation = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/delegation.pb.gw.go b/x/query/types/delegation.pb.gw.go new file mode 100644 index 00000000..11579c52 --- /dev/null +++ b/x/query/types/delegation.pb.gw.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/query/v1beta1/delegation.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_QueryDelegation_Delegator_0(ctx context.Context, marshaler runtime.Marshaler, client QueryDelegationClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDelegatorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + val, ok = pathParams["delegator"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "delegator") + } + + protoReq.Delegator, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "delegator", err) + } + + msg, err := client.Delegator(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryDelegation_Delegator_0(ctx context.Context, marshaler runtime.Marshaler, server QueryDelegationServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDelegatorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + val, ok = pathParams["delegator"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "delegator") + } + + protoReq.Delegator, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "delegator", err) + } + + msg, err := server.Delegator(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_QueryDelegation_DelegatorsByStaker_0 = &utilities.DoubleArray{Encoding: map[string]int{"staker": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_QueryDelegation_DelegatorsByStaker_0(ctx context.Context, marshaler runtime.Marshaler, client QueryDelegationClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDelegatorsByStakerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryDelegation_DelegatorsByStaker_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DelegatorsByStaker(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryDelegation_DelegatorsByStaker_0(ctx context.Context, marshaler runtime.Marshaler, server QueryDelegationServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDelegatorsByStakerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["staker"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "staker") + } + + protoReq.Staker, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "staker", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryDelegation_DelegatorsByStaker_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.DelegatorsByStaker(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_QueryDelegation_StakersByDelegator_0 = &utilities.DoubleArray{Encoding: map[string]int{"delegator": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_QueryDelegation_StakersByDelegator_0(ctx context.Context, marshaler runtime.Marshaler, client QueryDelegationClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersByDelegatorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["delegator"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "delegator") + } + + protoReq.Delegator, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "delegator", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryDelegation_StakersByDelegator_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.StakersByDelegator(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryDelegation_StakersByDelegator_0(ctx context.Context, marshaler runtime.Marshaler, server QueryDelegationServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersByDelegatorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["delegator"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "delegator") + } + + protoReq.Delegator, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "delegator", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryDelegation_StakersByDelegator_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.StakersByDelegator(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryDelegationHandlerServer registers the http handlers for service QueryDelegation to "mux". +// UnaryRPC :call QueryDelegationServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryDelegationHandlerFromEndpoint instead. +func RegisterQueryDelegationHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryDelegationServer) error { + + mux.Handle("GET", pattern_QueryDelegation_Delegator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryDelegation_Delegator_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryDelegation_Delegator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryDelegation_DelegatorsByStaker_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryDelegation_DelegatorsByStaker_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryDelegation_DelegatorsByStaker_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryDelegation_StakersByDelegator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryDelegation_StakersByDelegator_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryDelegation_StakersByDelegator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryDelegationHandlerFromEndpoint is same as RegisterQueryDelegationHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryDelegationHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryDelegationHandler(ctx, mux, conn) +} + +// RegisterQueryDelegationHandler registers the http handlers for service QueryDelegation to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryDelegationHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryDelegationHandlerClient(ctx, mux, NewQueryDelegationClient(conn)) +} + +// RegisterQueryDelegationHandlerClient registers the http handlers for service QueryDelegation +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryDelegationClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryDelegationClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryDelegationClient" to call the correct interceptors. +func RegisterQueryDelegationHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryDelegationClient) error { + + mux.Handle("GET", pattern_QueryDelegation_Delegator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryDelegation_Delegator_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryDelegation_Delegator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryDelegation_DelegatorsByStaker_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryDelegation_DelegatorsByStaker_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryDelegation_DelegatorsByStaker_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryDelegation_StakersByDelegator_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryDelegation_StakersByDelegator_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryDelegation_StakersByDelegator_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_QueryDelegation_Delegator_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 3}, []string{"kyve", "query", "v1beta1", "delegator", "staker"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryDelegation_DelegatorsByStaker_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "delegators_by_staker", "staker"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryDelegation_StakersByDelegator_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "stakers_by_delegator", "delegator"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_QueryDelegation_Delegator_0 = runtime.ForwardResponseMessage + + forward_QueryDelegation_DelegatorsByStaker_0 = runtime.ForwardResponseMessage + + forward_QueryDelegation_StakersByDelegator_0 = runtime.ForwardResponseMessage +) diff --git a/x/query/types/expected_keepers.go b/x/query/types/expected_keepers.go new file mode 100644 index 00000000..6aa6e977 --- /dev/null +++ b/x/query/types/expected_keepers.go @@ -0,0 +1,18 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetAccount(ctx sdk.Context, addr sdk.AccAddress) types.AccountI + // Methods imported from account should be defined here +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + SpendableCoins(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins + // Methods imported from bank should be defined here +} diff --git a/x/query/types/keys.go b/x/query/types/keys.go new file mode 100644 index 00000000..7237b127 --- /dev/null +++ b/x/query/types/keys.go @@ -0,0 +1,19 @@ +package types + +const ( + // ModuleName defines the module name + ModuleName = "query" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_query" +) + +func KeyPrefix(p string) []byte { + return []byte(p) +} diff --git a/x/query/types/params.pb.go b/x/query/types/params.pb.go new file mode 100644 index 00000000..875a8a42 --- /dev/null +++ b/x/query/types/params.pb.go @@ -0,0 +1,799 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/params.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/KYVENetwork/chain/x/bundles/types" + types1 "github.com/KYVENetwork/chain/x/delegation/types" + types2 "github.com/KYVENetwork/chain/x/global/types" + types3 "github.com/KYVENetwork/chain/x/stakers/types" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest ... +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b5269c0a69f1d3d4, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse ... +type QueryParamsResponse struct { + // bundles_params ... + BundlesParams *types.Params `protobuf:"bytes,1,opt,name=bundles_params,json=bundlesParams,proto3" json:"bundles_params,omitempty"` + // delegation_params ... + DelegationParams *types1.Params `protobuf:"bytes,2,opt,name=delegation_params,json=delegationParams,proto3" json:"delegation_params,omitempty"` + // global_params ... + GlobalParams *types2.Params `protobuf:"bytes,3,opt,name=global_params,json=globalParams,proto3" json:"global_params,omitempty"` + // gov_params ... + GovParams *v1.QueryParamsResponse `protobuf:"bytes,4,opt,name=gov_params,json=govParams,proto3" json:"gov_params,omitempty"` + // stakers_params ... + StakersParams *types3.Params `protobuf:"bytes,5,opt,name=stakers_params,json=stakersParams,proto3" json:"stakers_params,omitempty"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b5269c0a69f1d3d4, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetBundlesParams() *types.Params { + if m != nil { + return m.BundlesParams + } + return nil +} + +func (m *QueryParamsResponse) GetDelegationParams() *types1.Params { + if m != nil { + return m.DelegationParams + } + return nil +} + +func (m *QueryParamsResponse) GetGlobalParams() *types2.Params { + if m != nil { + return m.GlobalParams + } + return nil +} + +func (m *QueryParamsResponse) GetGovParams() *v1.QueryParamsResponse { + if m != nil { + return m.GovParams + } + return nil +} + +func (m *QueryParamsResponse) GetStakersParams() *types3.Params { + if m != nil { + return m.StakersParams + } + return nil +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "kyve.query.v1beta1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "kyve.query.v1beta1.QueryParamsResponse") +} + +func init() { proto.RegisterFile("kyve/query/v1beta1/params.proto", fileDescriptor_b5269c0a69f1d3d4) } + +var fileDescriptor_b5269c0a69f1d3d4 = []byte{ + // 427 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xbf, 0x8e, 0xd3, 0x40, + 0x10, 0xc6, 0xe3, 0x00, 0x91, 0xd8, 0x90, 0x08, 0x0c, 0x05, 0x98, 0xc8, 0x09, 0x16, 0x02, 0x44, + 0xb1, 0x2b, 0x87, 0x17, 0x00, 0x02, 0x15, 0x08, 0x41, 0x0a, 0x24, 0x68, 0xd0, 0x3a, 0x59, 0x6d, + 0xac, 0x38, 0x1e, 0xc7, 0xbb, 0x5e, 0x48, 0x41, 0x43, 0x4d, 0x81, 0xc4, 0x4b, 0x51, 0x46, 0xa2, + 0xa1, 0x44, 0xc9, 0x3d, 0xc1, 0x3d, 0xc1, 0x29, 0xbb, 0xeb, 0xcb, 0x3f, 0x9f, 0xae, 0xf3, 0xcc, + 0x7c, 0xf3, 0x9b, 0xf1, 0xb7, 0x83, 0xba, 0xd3, 0x85, 0x62, 0x64, 0x5e, 0xb0, 0x7c, 0x41, 0x54, + 0x18, 0x31, 0x49, 0x43, 0x92, 0xd1, 0x9c, 0xce, 0x04, 0xce, 0x72, 0x90, 0xe0, 0xba, 0x1b, 0x01, + 0xd6, 0x02, 0x6c, 0x05, 0xde, 0xbd, 0x11, 0x88, 0x19, 0x08, 0xc2, 0x41, 0x11, 0x15, 0x9a, 0x6e, + 0x23, 0xf7, 0x3a, 0x1c, 0x80, 0x27, 0x8c, 0xd0, 0x2c, 0x26, 0x34, 0x4d, 0x41, 0x52, 0x19, 0x43, + 0x6a, 0x61, 0xde, 0x03, 0x3d, 0x2d, 0x2a, 0xd2, 0x71, 0xc2, 0x44, 0xe5, 0x3c, 0xef, 0xa1, 0x96, + 0x8c, 0x59, 0xc2, 0xb8, 0x6e, 0xad, 0x56, 0xf5, 0xb4, 0x8a, 0x27, 0x10, 0xd1, 0xe4, 0x5c, 0x61, + 0xc2, 0xbd, 0x51, 0x42, 0xd2, 0x29, 0xcb, 0xab, 0x47, 0x05, 0x77, 0x90, 0xfb, 0x61, 0xb3, 0xfa, + 0x7b, 0x9d, 0x1c, 0xb2, 0x79, 0xc1, 0x84, 0x0c, 0x4e, 0xeb, 0xe8, 0xf6, 0x5e, 0x5a, 0x64, 0x90, + 0x0a, 0xe6, 0x0e, 0x50, 0xdb, 0x2e, 0xfe, 0xc5, 0x50, 0xee, 0x3a, 0x3d, 0xe7, 0x49, 0xb3, 0xdf, + 0xc1, 0xda, 0x21, 0x5b, 0x2b, 0x3d, 0xc2, 0xb6, 0xbb, 0x65, 0xf3, 0x26, 0x74, 0xdf, 0xa2, 0x5b, + 0xdb, 0x5f, 0x2b, 0x39, 0x75, 0xcd, 0xe9, 0x1a, 0xce, 0xb6, 0x7c, 0x88, 0xba, 0xb9, 0x2d, 0x59, + 0xda, 0x73, 0xd4, 0x32, 0xff, 0x5c, 0x92, 0xae, 0x68, 0xd2, 0x7d, 0x43, 0xb2, 0x76, 0x1c, 0x50, + 0x6e, 0x98, 0xb4, 0x25, 0xbc, 0x40, 0x88, 0x83, 0x2a, 0xdb, 0xaf, 0xea, 0xf6, 0x00, 0x9b, 0xe7, + 0xc5, 0x1c, 0x14, 0x56, 0x21, 0xae, 0x30, 0x63, 0x78, 0x9d, 0x83, 0xb2, 0x88, 0x01, 0x6a, 0x5b, + 0x97, 0x4b, 0xcc, 0xb5, 0x5d, 0x5f, 0x6c, 0xed, 0xc8, 0x17, 0x9b, 0x37, 0x61, 0xff, 0xa7, 0x83, + 0x9a, 0x3b, 0x73, 0xdc, 0xef, 0xa8, 0x61, 0xbf, 0x1e, 0xe1, 0xe3, 0x03, 0xc4, 0xc7, 0xcf, 0xe6, + 0x3d, 0xbe, 0x54, 0x67, 0x56, 0x0f, 0x82, 0x1f, 0x7f, 0x4f, 0x7e, 0xd7, 0x3b, 0xae, 0x47, 0x2e, + 0x3c, 0xfd, 0x97, 0xaf, 0xfe, 0xac, 0x7c, 0x67, 0xb9, 0xf2, 0x9d, 0xff, 0x2b, 0xdf, 0xf9, 0xb5, + 0xf6, 0x6b, 0xcb, 0xb5, 0x5f, 0xfb, 0xb7, 0xf6, 0x6b, 0x9f, 0x9f, 0xf2, 0x58, 0x4e, 0x8a, 0x08, + 0x8f, 0x60, 0x46, 0xde, 0x7c, 0xfa, 0xf8, 0xfa, 0x1d, 0x93, 0x5f, 0x21, 0x9f, 0x92, 0xd1, 0x84, + 0xc6, 0x29, 0xf9, 0x66, 0x71, 0x72, 0x91, 0x31, 0x11, 0x35, 0xf4, 0x99, 0x3d, 0x3b, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x49, 0x98, 0xf2, 0x4c, 0x64, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryParamsClient is the client API for QueryParams service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryParamsClient interface { + // Pools queries for all pools. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryParamsClient struct { + cc grpc1.ClientConn +} + +func NewQueryParamsClient(cc grpc1.ClientConn) QueryParamsClient { + return &queryParamsClient{cc} +} + +func (c *queryParamsClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryParams/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryParamsServer is the server API for QueryParams service. +type QueryParamsServer interface { + // Pools queries for all pools. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryParamsServer can be embedded to have forward compatible implementations. +type UnimplementedQueryParamsServer struct { +} + +func (*UnimplementedQueryParamsServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryParamsServer(s grpc1.Server, srv QueryParamsServer) { + s.RegisterService(&_QueryParams_serviceDesc, srv) +} + +func _QueryParams_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryParamsServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryParams/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryParamsServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryParams_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.query.v1beta1.QueryParams", + HandlerType: (*QueryParamsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _QueryParams_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/query/v1beta1/params.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StakersParams != nil { + { + size, err := m.StakersParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.GovParams != nil { + { + size, err := m.GovParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GlobalParams != nil { + { + size, err := m.GlobalParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DelegationParams != nil { + { + size, err := m.DelegationParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.BundlesParams != nil { + { + size, err := m.BundlesParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BundlesParams != nil { + l = m.BundlesParams.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.DelegationParams != nil { + l = m.DelegationParams.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.GlobalParams != nil { + l = m.GlobalParams.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.GovParams != nil { + l = m.GovParams.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.StakersParams != nil { + l = m.StakersParams.Size() + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundlesParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BundlesParams == nil { + m.BundlesParams = &types.Params{} + } + if err := m.BundlesParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegationParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DelegationParams == nil { + m.DelegationParams = &types1.Params{} + } + if err := m.DelegationParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GlobalParams == nil { + m.GlobalParams = &types2.Params{} + } + if err := m.GlobalParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GovParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GovParams == nil { + m.GovParams = &v1.QueryParamsResponse{} + } + if err := m.GovParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StakersParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StakersParams == nil { + m.StakersParams = &types3.Params{} + } + if err := m.StakersParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/params.pb.gw.go b/x/query/types/params.pb.gw.go new file mode 100644 index 00000000..978da8ab --- /dev/null +++ b/x/query/types/params.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/query/v1beta1/params.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_QueryParams_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryParamsClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryParams_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryParamsServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryParamsHandlerServer registers the http handlers for service QueryParams to "mux". +// UnaryRPC :call QueryParamsServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryParamsHandlerFromEndpoint instead. +func RegisterQueryParamsHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryParamsServer) error { + + mux.Handle("GET", pattern_QueryParams_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryParams_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryParams_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryParamsHandlerFromEndpoint is same as RegisterQueryParamsHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryParamsHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryParamsHandler(ctx, mux, conn) +} + +// RegisterQueryParamsHandler registers the http handlers for service QueryParams to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryParamsHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryParamsHandlerClient(ctx, mux, NewQueryParamsClient(conn)) +} + +// RegisterQueryParamsHandlerClient registers the http handlers for service QueryParams +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryParamsClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryParamsClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryParamsClient" to call the correct interceptors. +func RegisterQueryParamsHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryParamsClient) error { + + mux.Handle("GET", pattern_QueryParams_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryParams_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryParams_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_QueryParams_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "query", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_QueryParams_Params_0 = runtime.ForwardResponseMessage +) diff --git a/x/query/types/pools.pb.go b/x/query/types/pools.pb.go new file mode 100644 index 00000000..636be4ad --- /dev/null +++ b/x/query/types/pools.pb.go @@ -0,0 +1,1685 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/pools.proto + +package types + +import ( + context "context" + fmt "fmt" + types1 "github.com/KYVENetwork/chain/x/bundles/types" + types "github.com/KYVENetwork/chain/x/pool/types" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryPoolsRequest is the request type for the Query/Pools RPC method. +type QueryPoolsRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // search ... + Search string `protobuf:"bytes,2,opt,name=search,proto3" json:"search,omitempty"` + // runtime ... + Runtime string `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` + // disabled ... + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + // storage_provider_id ... + StorageProviderId uint32 `protobuf:"varint,5,opt,name=storage_provider_id,json=storageProviderId,proto3" json:"storage_provider_id,omitempty"` +} + +func (m *QueryPoolsRequest) Reset() { *m = QueryPoolsRequest{} } +func (m *QueryPoolsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPoolsRequest) ProtoMessage() {} +func (*QueryPoolsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b627739c2d7723dc, []int{0} +} +func (m *QueryPoolsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPoolsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPoolsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPoolsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPoolsRequest.Merge(m, src) +} +func (m *QueryPoolsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPoolsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPoolsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPoolsRequest proto.InternalMessageInfo + +func (m *QueryPoolsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryPoolsRequest) GetSearch() string { + if m != nil { + return m.Search + } + return "" +} + +func (m *QueryPoolsRequest) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *QueryPoolsRequest) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *QueryPoolsRequest) GetStorageProviderId() uint32 { + if m != nil { + return m.StorageProviderId + } + return 0 +} + +// QueryPoolsResponse is the response type for the Query/Pools RPC method. +type QueryPoolsResponse struct { + // pools ... + Pools []PoolResponse `protobuf:"bytes,1,rep,name=pools,proto3" json:"pools"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPoolsResponse) Reset() { *m = QueryPoolsResponse{} } +func (m *QueryPoolsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPoolsResponse) ProtoMessage() {} +func (*QueryPoolsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b627739c2d7723dc, []int{1} +} +func (m *QueryPoolsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPoolsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPoolsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPoolsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPoolsResponse.Merge(m, src) +} +func (m *QueryPoolsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPoolsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPoolsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPoolsResponse proto.InternalMessageInfo + +func (m *QueryPoolsResponse) GetPools() []PoolResponse { + if m != nil { + return m.Pools + } + return nil +} + +func (m *QueryPoolsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// PoolResponse ... +type PoolResponse struct { + // id ... + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // data ... + Data *types.Pool `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // bundle_proposal ... + BundleProposal *types1.BundleProposal `protobuf:"bytes,3,opt,name=bundle_proposal,json=bundleProposal,proto3" json:"bundle_proposal,omitempty"` + // stakers ... + Stakers []string `protobuf:"bytes,4,rep,name=stakers,proto3" json:"stakers,omitempty"` + // total_stake ... + TotalSelfDelegation uint64 `protobuf:"varint,5,opt,name=total_self_delegation,json=totalSelfDelegation,proto3" json:"total_self_delegation,omitempty"` + // total_delegation ... + TotalDelegation uint64 `protobuf:"varint,6,opt,name=total_delegation,json=totalDelegation,proto3" json:"total_delegation,omitempty"` + // status ... + Status types.PoolStatus `protobuf:"varint,7,opt,name=status,proto3,enum=kyve.pool.v1beta1.PoolStatus" json:"status,omitempty"` +} + +func (m *PoolResponse) Reset() { *m = PoolResponse{} } +func (m *PoolResponse) String() string { return proto.CompactTextString(m) } +func (*PoolResponse) ProtoMessage() {} +func (*PoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b627739c2d7723dc, []int{2} +} +func (m *PoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PoolResponse.Merge(m, src) +} +func (m *PoolResponse) XXX_Size() int { + return m.Size() +} +func (m *PoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PoolResponse proto.InternalMessageInfo + +func (m *PoolResponse) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *PoolResponse) GetData() *types.Pool { + if m != nil { + return m.Data + } + return nil +} + +func (m *PoolResponse) GetBundleProposal() *types1.BundleProposal { + if m != nil { + return m.BundleProposal + } + return nil +} + +func (m *PoolResponse) GetStakers() []string { + if m != nil { + return m.Stakers + } + return nil +} + +func (m *PoolResponse) GetTotalSelfDelegation() uint64 { + if m != nil { + return m.TotalSelfDelegation + } + return 0 +} + +func (m *PoolResponse) GetTotalDelegation() uint64 { + if m != nil { + return m.TotalDelegation + } + return 0 +} + +func (m *PoolResponse) GetStatus() types.PoolStatus { + if m != nil { + return m.Status + } + return types.POOL_STATUS_UNSPECIFIED +} + +// QueryPoolRequest is the request type for the Query/Pool RPC method. +type QueryPoolRequest struct { + // id defines the unique ID of the pool. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *QueryPoolRequest) Reset() { *m = QueryPoolRequest{} } +func (m *QueryPoolRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPoolRequest) ProtoMessage() {} +func (*QueryPoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b627739c2d7723dc, []int{3} +} +func (m *QueryPoolRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPoolRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPoolRequest.Merge(m, src) +} +func (m *QueryPoolRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPoolRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPoolRequest proto.InternalMessageInfo + +func (m *QueryPoolRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// QueryPoolResponse is the response type for the Query/Pool RPC method. +type QueryPoolResponse struct { + // pool ... + Pool PoolResponse `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool"` +} + +func (m *QueryPoolResponse) Reset() { *m = QueryPoolResponse{} } +func (m *QueryPoolResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPoolResponse) ProtoMessage() {} +func (*QueryPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b627739c2d7723dc, []int{4} +} +func (m *QueryPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPoolResponse.Merge(m, src) +} +func (m *QueryPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPoolResponse proto.InternalMessageInfo + +func (m *QueryPoolResponse) GetPool() PoolResponse { + if m != nil { + return m.Pool + } + return PoolResponse{} +} + +func init() { + proto.RegisterType((*QueryPoolsRequest)(nil), "kyve.query.v1beta1.QueryPoolsRequest") + proto.RegisterType((*QueryPoolsResponse)(nil), "kyve.query.v1beta1.QueryPoolsResponse") + proto.RegisterType((*PoolResponse)(nil), "kyve.query.v1beta1.PoolResponse") + proto.RegisterType((*QueryPoolRequest)(nil), "kyve.query.v1beta1.QueryPoolRequest") + proto.RegisterType((*QueryPoolResponse)(nil), "kyve.query.v1beta1.QueryPoolResponse") +} + +func init() { proto.RegisterFile("kyve/query/v1beta1/pools.proto", fileDescriptor_b627739c2d7723dc) } + +var fileDescriptor_b627739c2d7723dc = []byte{ + // 659 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0xde, 0x96, 0xee, 0x02, 0x83, 0xf2, 0x63, 0xf0, 0x47, 0x5d, 0xa5, 0xd4, 0x06, 0x70, 0xc5, + 0xa4, 0x13, 0xd6, 0x78, 0x31, 0x9e, 0x08, 0x6a, 0x8c, 0x51, 0xd7, 0x92, 0x98, 0xe8, 0x65, 0x33, + 0xdd, 0x0e, 0x65, 0x42, 0xe9, 0x94, 0xce, 0x2c, 0xba, 0x1a, 0x2f, 0xfc, 0x05, 0x26, 0x1e, 0xfd, + 0x87, 0x88, 0x27, 0x12, 0x2f, 0x7a, 0x31, 0x06, 0xfc, 0x43, 0x4c, 0xa7, 0xd3, 0xda, 0x15, 0x71, + 0xbd, 0xf5, 0xf5, 0x7d, 0xdf, 0x7b, 0xdf, 0xfb, 0xde, 0x6b, 0x81, 0xb5, 0x33, 0xd8, 0x27, 0x68, + 0xaf, 0x4f, 0xd2, 0x01, 0xda, 0x5f, 0xf3, 0x89, 0xc0, 0x6b, 0x28, 0x61, 0x2c, 0xe2, 0x6e, 0x92, + 0x32, 0xc1, 0x20, 0xcc, 0xf2, 0xae, 0xcc, 0xbb, 0x2a, 0xdf, 0x5c, 0xed, 0x31, 0xbe, 0xcb, 0x38, + 0xf2, 0x31, 0x3f, 0x45, 0xc5, 0x21, 0x8d, 0xb1, 0xa0, 0x2c, 0xce, 0xf9, 0xcd, 0x0b, 0x21, 0x0b, + 0x99, 0x7c, 0x44, 0xd9, 0x93, 0x7a, 0x7b, 0x2d, 0x64, 0x2c, 0x8c, 0x08, 0xc2, 0x09, 0x45, 0x38, + 0x8e, 0x99, 0x90, 0x14, 0xd5, 0xb3, 0xe9, 0x48, 0x4d, 0x7e, 0x3f, 0x0e, 0x22, 0xc2, 0xcb, 0xd2, + 0x2a, 0x2e, 0x2a, 0x48, 0x4c, 0xa6, 0x74, 0x48, 0x76, 0x9e, 0x75, 0xbe, 0x69, 0x60, 0xee, 0x79, + 0x26, 0xac, 0x93, 0x8d, 0xe2, 0x91, 0xbd, 0x3e, 0xe1, 0x02, 0x3e, 0x00, 0xe0, 0xb7, 0x3e, 0x53, + 0xb3, 0xb5, 0xd6, 0x54, 0x7b, 0xc5, 0xcd, 0x87, 0x71, 0xb3, 0x61, 0x86, 0xe7, 0x74, 0x3b, 0x38, + 0x24, 0x8a, 0xeb, 0x55, 0x98, 0xf0, 0x12, 0x68, 0x70, 0x82, 0xd3, 0xde, 0xb6, 0xa9, 0xdb, 0x5a, + 0x6b, 0xd2, 0x53, 0x11, 0x34, 0xc1, 0x78, 0xda, 0x8f, 0x05, 0xdd, 0x25, 0xe6, 0x98, 0x4c, 0x14, + 0x21, 0x6c, 0x82, 0x89, 0x80, 0x72, 0xec, 0x47, 0x24, 0x30, 0x0d, 0x5b, 0x6b, 0x4d, 0x78, 0x65, + 0x0c, 0x5d, 0x30, 0xcf, 0x05, 0x4b, 0x71, 0x48, 0xba, 0x49, 0xca, 0xf6, 0x69, 0x40, 0xd2, 0x2e, + 0x0d, 0xcc, 0xba, 0xad, 0xb5, 0xce, 0x7b, 0x73, 0x2a, 0xd5, 0x51, 0x99, 0x47, 0x81, 0xf3, 0x49, + 0x03, 0xb0, 0x3a, 0x1b, 0x4f, 0x58, 0xcc, 0x09, 0xbc, 0x07, 0xea, 0x72, 0x6f, 0xa6, 0x66, 0x8f, + 0xb5, 0xa6, 0xda, 0xb6, 0x7b, 0x7a, 0x71, 0x6e, 0xc6, 0x28, 0x08, 0xeb, 0xc6, 0xe1, 0xf7, 0xc5, + 0x9a, 0x97, 0x93, 0xe0, 0xc3, 0x21, 0x6b, 0x74, 0x69, 0xcd, 0x8d, 0x91, 0xd6, 0xe4, 0x95, 0xaa, + 0xde, 0x38, 0x9f, 0x75, 0x70, 0xae, 0xda, 0x06, 0x4e, 0x03, 0x9d, 0x06, 0xd2, 0x6c, 0xc3, 0xd3, + 0x69, 0x00, 0x6f, 0x01, 0x23, 0xc0, 0x02, 0xab, 0x1e, 0x97, 0x73, 0x99, 0x72, 0x75, 0x43, 0x2a, + 0x25, 0x08, 0x3e, 0x01, 0x33, 0xf9, 0xda, 0x33, 0x6b, 0x12, 0xc6, 0x71, 0x24, 0x9d, 0x9d, 0x6a, + 0x2f, 0xe5, 0xbc, 0xe2, 0x26, 0x0a, 0xea, 0xba, 0x8c, 0x3b, 0x0a, 0xeb, 0x4d, 0xfb, 0x43, 0x71, + 0xb6, 0x20, 0x2e, 0xf0, 0x0e, 0x49, 0xb9, 0x69, 0xd8, 0x63, 0xd9, 0x82, 0x54, 0x08, 0xdb, 0xe0, + 0xa2, 0x60, 0x02, 0x47, 0x5d, 0x4e, 0xa2, 0xad, 0x6e, 0x40, 0x22, 0x12, 0xe6, 0x56, 0xd4, 0xa5, + 0xf0, 0x79, 0x99, 0xdc, 0x24, 0xd1, 0xd6, 0x46, 0x99, 0x82, 0x37, 0xc1, 0x6c, 0xce, 0xa9, 0xc0, + 0x1b, 0x12, 0x3e, 0x23, 0xdf, 0x57, 0xa0, 0x77, 0x40, 0x83, 0x0b, 0x2c, 0xfa, 0xdc, 0x1c, 0xb7, + 0xb5, 0xd6, 0x74, 0x7b, 0xe1, 0x8c, 0xb1, 0x37, 0x25, 0xc8, 0x53, 0x60, 0xc7, 0x01, 0xb3, 0xe5, + 0xa6, 0x8b, 0x23, 0xfe, 0xc3, 0x4f, 0xe7, 0x59, 0xe5, 0xd2, 0x4b, 0xd3, 0xef, 0x02, 0x23, 0xab, + 0xad, 0x6e, 0xfc, 0x7f, 0x6f, 0x41, 0x72, 0xda, 0x07, 0x3a, 0x98, 0x2c, 0x2b, 0xc2, 0x01, 0xa8, + 0xcb, 0x3b, 0x83, 0xcb, 0x7f, 0x2b, 0x72, 0xea, 0x1b, 0x6b, 0xae, 0x8c, 0x82, 0xe5, 0x1d, 0x9d, + 0xeb, 0x07, 0x5f, 0x7e, 0x7e, 0xd4, 0xaf, 0xc2, 0x2b, 0xe8, 0xac, 0x1f, 0x10, 0x7c, 0x0b, 0x0c, + 0x29, 0x61, 0xe9, 0x9f, 0x25, 0x8b, 0xc6, 0xcb, 0x23, 0x50, 0xaa, 0xef, 0xb2, 0xec, 0xbb, 0x08, + 0x17, 0xce, 0xea, 0x8b, 0xde, 0xd1, 0xe0, 0xfd, 0xfa, 0xc6, 0xe1, 0xb1, 0xa5, 0x1d, 0x1d, 0x5b, + 0xda, 0x8f, 0x63, 0x4b, 0xfb, 0x70, 0x62, 0xd5, 0x8e, 0x4e, 0xac, 0xda, 0xd7, 0x13, 0xab, 0xf6, + 0x6a, 0x35, 0xa4, 0x62, 0xbb, 0xef, 0xbb, 0x3d, 0xb6, 0x8b, 0x1e, 0xbf, 0x7c, 0x71, 0xff, 0x29, + 0x11, 0xaf, 0x59, 0xba, 0x83, 0x7a, 0xdb, 0x98, 0xc6, 0xe8, 0x8d, 0xaa, 0x28, 0x06, 0x09, 0xe1, + 0x7e, 0x43, 0xfe, 0x8d, 0x6e, 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x47, 0xf1, 0xcf, 0x38, 0x65, + 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryPoolClient is the client API for QueryPool service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryPoolClient interface { + // Pools queries for all pools. + Pools(ctx context.Context, in *QueryPoolsRequest, opts ...grpc.CallOption) (*QueryPoolsResponse, error) + // Pool queries a pool by its Id. + Pool(ctx context.Context, in *QueryPoolRequest, opts ...grpc.CallOption) (*QueryPoolResponse, error) +} + +type queryPoolClient struct { + cc grpc1.ClientConn +} + +func NewQueryPoolClient(cc grpc1.ClientConn) QueryPoolClient { + return &queryPoolClient{cc} +} + +func (c *queryPoolClient) Pools(ctx context.Context, in *QueryPoolsRequest, opts ...grpc.CallOption) (*QueryPoolsResponse, error) { + out := new(QueryPoolsResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryPool/Pools", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryPoolClient) Pool(ctx context.Context, in *QueryPoolRequest, opts ...grpc.CallOption) (*QueryPoolResponse, error) { + out := new(QueryPoolResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryPool/Pool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryPoolServer is the server API for QueryPool service. +type QueryPoolServer interface { + // Pools queries for all pools. + Pools(context.Context, *QueryPoolsRequest) (*QueryPoolsResponse, error) + // Pool queries a pool by its Id. + Pool(context.Context, *QueryPoolRequest) (*QueryPoolResponse, error) +} + +// UnimplementedQueryPoolServer can be embedded to have forward compatible implementations. +type UnimplementedQueryPoolServer struct { +} + +func (*UnimplementedQueryPoolServer) Pools(ctx context.Context, req *QueryPoolsRequest) (*QueryPoolsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pools not implemented") +} +func (*UnimplementedQueryPoolServer) Pool(ctx context.Context, req *QueryPoolRequest) (*QueryPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pool not implemented") +} + +func RegisterQueryPoolServer(s grpc1.Server, srv QueryPoolServer) { + s.RegisterService(&_QueryPool_serviceDesc, srv) +} + +func _QueryPool_Pools_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPoolsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryPoolServer).Pools(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryPool/Pools", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryPoolServer).Pools(ctx, req.(*QueryPoolsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryPool_Pool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryPoolServer).Pool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryPool/Pool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryPoolServer).Pool(ctx, req.(*QueryPoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryPool_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.query.v1beta1.QueryPool", + HandlerType: (*QueryPoolServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Pools", + Handler: _QueryPool_Pools_Handler, + }, + { + MethodName: "Pool", + Handler: _QueryPool_Pool_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/query/v1beta1/pools.proto", +} + +func (m *QueryPoolsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPoolsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPoolsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StorageProviderId != 0 { + i = encodeVarintPools(dAtA, i, uint64(m.StorageProviderId)) + i-- + dAtA[i] = 0x28 + } + if m.Disabled { + i-- + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintPools(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x1a + } + if len(m.Search) > 0 { + i -= len(m.Search) + copy(dAtA[i:], m.Search) + i = encodeVarintPools(dAtA, i, uint64(len(m.Search))) + i-- + dAtA[i] = 0x12 + } + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPools(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPoolsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPoolsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPoolsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPools(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Pools) > 0 { + for iNdEx := len(m.Pools) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pools[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPools(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintPools(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x38 + } + if m.TotalDelegation != 0 { + i = encodeVarintPools(dAtA, i, uint64(m.TotalDelegation)) + i-- + dAtA[i] = 0x30 + } + if m.TotalSelfDelegation != 0 { + i = encodeVarintPools(dAtA, i, uint64(m.TotalSelfDelegation)) + i-- + dAtA[i] = 0x28 + } + if len(m.Stakers) > 0 { + for iNdEx := len(m.Stakers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Stakers[iNdEx]) + copy(dAtA[i:], m.Stakers[iNdEx]) + i = encodeVarintPools(dAtA, i, uint64(len(m.Stakers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.BundleProposal != nil { + { + size, err := m.BundleProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPools(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Data != nil { + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPools(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintPools(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryPoolRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPoolRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPoolRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintPools(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintPools(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintPools(dAtA []byte, offset int, v uint64) int { + offset -= sovPools(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryPoolsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovPools(uint64(l)) + } + l = len(m.Search) + if l > 0 { + n += 1 + l + sovPools(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovPools(uint64(l)) + } + if m.Disabled { + n += 2 + } + if m.StorageProviderId != 0 { + n += 1 + sovPools(uint64(m.StorageProviderId)) + } + return n +} + +func (m *QueryPoolsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Pools) > 0 { + for _, e := range m.Pools { + l = e.Size() + n += 1 + l + sovPools(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovPools(uint64(l)) + } + return n +} + +func (m *PoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPools(uint64(m.Id)) + } + if m.Data != nil { + l = m.Data.Size() + n += 1 + l + sovPools(uint64(l)) + } + if m.BundleProposal != nil { + l = m.BundleProposal.Size() + n += 1 + l + sovPools(uint64(l)) + } + if len(m.Stakers) > 0 { + for _, s := range m.Stakers { + l = len(s) + n += 1 + l + sovPools(uint64(l)) + } + } + if m.TotalSelfDelegation != 0 { + n += 1 + sovPools(uint64(m.TotalSelfDelegation)) + } + if m.TotalDelegation != 0 { + n += 1 + sovPools(uint64(m.TotalDelegation)) + } + if m.Status != 0 { + n += 1 + sovPools(uint64(m.Status)) + } + return n +} + +func (m *QueryPoolRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovPools(uint64(m.Id)) + } + return n +} + +func (m *QueryPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Pool.Size() + n += 1 + l + sovPools(uint64(l)) + return n +} + +func sovPools(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozPools(x uint64) (n int) { + return sovPools(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryPoolsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPoolsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPoolsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Search = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageProviderId", wireType) + } + m.StorageProviderId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StorageProviderId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPools(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPools + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPoolsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPoolsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPoolsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pools", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pools = append(m.Pools, PoolResponse{}) + if err := m.Pools[len(m.Pools)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPools(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPools + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = &types.Pool{} + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BundleProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BundleProposal == nil { + m.BundleProposal = &types1.BundleProposal{} + } + if err := m.BundleProposal.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stakers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stakers = append(m.Stakers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalSelfDelegation", wireType) + } + m.TotalSelfDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalSelfDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDelegation", wireType) + } + m.TotalDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= types.PoolStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPools(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPools + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPoolRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPoolRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPoolRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipPools(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPools + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPools + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPools + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthPools + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPools(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthPools + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPools(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPools + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPools + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPools + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthPools + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupPools + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthPools + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthPools = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPools = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupPools = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/pools.pb.gw.go b/x/query/types/pools.pb.gw.go new file mode 100644 index 00000000..58981c7d --- /dev/null +++ b/x/query/types/pools.pb.gw.go @@ -0,0 +1,272 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/query/v1beta1/pools.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_QueryPool_Pools_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_QueryPool_Pools_0(ctx context.Context, marshaler runtime.Marshaler, client QueryPoolClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPoolsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryPool_Pools_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Pools(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryPool_Pools_0(ctx context.Context, marshaler runtime.Marshaler, server QueryPoolServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPoolsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryPool_Pools_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Pools(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryPool_Pool_0(ctx context.Context, marshaler runtime.Marshaler, client QueryPoolClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPoolRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.Pool(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryPool_Pool_0(ctx context.Context, marshaler runtime.Marshaler, server QueryPoolServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPoolRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.Pool(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryPoolHandlerServer registers the http handlers for service QueryPool to "mux". +// UnaryRPC :call QueryPoolServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryPoolHandlerFromEndpoint instead. +func RegisterQueryPoolHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryPoolServer) error { + + mux.Handle("GET", pattern_QueryPool_Pools_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryPool_Pools_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryPool_Pools_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryPool_Pool_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryPool_Pool_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryPool_Pool_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryPoolHandlerFromEndpoint is same as RegisterQueryPoolHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryPoolHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryPoolHandler(ctx, mux, conn) +} + +// RegisterQueryPoolHandler registers the http handlers for service QueryPool to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryPoolHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryPoolHandlerClient(ctx, mux, NewQueryPoolClient(conn)) +} + +// RegisterQueryPoolHandlerClient registers the http handlers for service QueryPool +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryPoolClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryPoolClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryPoolClient" to call the correct interceptors. +func RegisterQueryPoolHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryPoolClient) error { + + mux.Handle("GET", pattern_QueryPool_Pools_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryPool_Pools_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryPool_Pools_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryPool_Pool_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryPool_Pool_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryPool_Pool_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_QueryPool_Pools_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "query", "v1beta1", "pools"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryPool_Pool_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "pool", "id"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_QueryPool_Pools_0 = runtime.ForwardResponseMessage + + forward_QueryPool_Pool_0 = runtime.ForwardResponseMessage +) diff --git a/x/query/types/query.pb.go b/x/query/types/query.pb.go new file mode 100644 index 00000000..775dae00 --- /dev/null +++ b/x/query/types/query.pb.go @@ -0,0 +1,2087 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/query.proto + +package types + +import ( + fmt "fmt" + types "github.com/KYVENetwork/chain/x/pool/types" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BasicPool contains the necessary properties need for a pool +// to be displayed in the UI +type BasicPool struct { + // id is the ID of the pool + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // name of the pool + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // runtime for the protocol nodes + // like evm, bitcoin, etc. + Runtime string `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` + // logo of the pool + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` + // operating_cost is the base payout for each bundle reward + OperatingCost uint64 `protobuf:"varint,5,opt,name=operating_cost,json=operatingCost,proto3" json:"operating_cost,omitempty"` + // upload_interval is the interval bundles get created + UploadInterval uint64 `protobuf:"varint,6,opt,name=upload_interval,json=uploadInterval,proto3" json:"upload_interval,omitempty"` + // total_funds of the pool. If the pool runs + // out of funds no more bundles will be produced + TotalFunds uint64 `protobuf:"varint,7,opt,name=total_funds,json=totalFunds,proto3" json:"total_funds,omitempty"` + // total_delegation of the pool + TotalDelegation uint64 `protobuf:"varint,8,opt,name=total_delegation,json=totalDelegation,proto3" json:"total_delegation,omitempty"` + // status of the pool if pool is able + // to produce bundles, etc. + Status types.PoolStatus `protobuf:"varint,9,opt,name=status,proto3,enum=kyve.pool.v1beta1.PoolStatus" json:"status,omitempty"` +} + +func (m *BasicPool) Reset() { *m = BasicPool{} } +func (m *BasicPool) String() string { return proto.CompactTextString(m) } +func (*BasicPool) ProtoMessage() {} +func (*BasicPool) Descriptor() ([]byte, []int) { + return fileDescriptor_6b41255feae93a15, []int{0} +} +func (m *BasicPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BasicPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BasicPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BasicPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicPool.Merge(m, src) +} +func (m *BasicPool) XXX_Size() int { + return m.Size() +} +func (m *BasicPool) XXX_DiscardUnknown() { + xxx_messageInfo_BasicPool.DiscardUnknown(m) +} + +var xxx_messageInfo_BasicPool proto.InternalMessageInfo + +func (m *BasicPool) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *BasicPool) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BasicPool) GetRuntime() string { + if m != nil { + return m.Runtime + } + return "" +} + +func (m *BasicPool) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +func (m *BasicPool) GetOperatingCost() uint64 { + if m != nil { + return m.OperatingCost + } + return 0 +} + +func (m *BasicPool) GetUploadInterval() uint64 { + if m != nil { + return m.UploadInterval + } + return 0 +} + +func (m *BasicPool) GetTotalFunds() uint64 { + if m != nil { + return m.TotalFunds + } + return 0 +} + +func (m *BasicPool) GetTotalDelegation() uint64 { + if m != nil { + return m.TotalDelegation + } + return 0 +} + +func (m *BasicPool) GetStatus() types.PoolStatus { + if m != nil { + return m.Status + } + return types.POOL_STATUS_UNSPECIFIED +} + +// FullStaker aggregates information from the staker and its delegators +// as well as pending queue entries into one object. +// It contains almost all needed information for a convenient usage +type FullStaker struct { + // address of the staker + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // metadata as logo, moniker, etc. + Metadata *StakerMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // amount the staker has delegated to himself + SelfDelegation uint64 `protobuf:"varint,3,opt,name=self_delegation,json=selfDelegation,proto3" json:"self_delegation,omitempty"` + // unbonding_amount is the amount the staker is currently unbonding + // from the self-delegation. + // This amount can be larger than `amount` when the staker + // got slashed during unbonding. However, at the end of + // the unbonding period this amount is double checked with the + // remaining amount. + SelfDelegationUnbonding uint64 `protobuf:"varint,4,opt,name=self_delegation_unbonding,json=selfDelegationUnbonding,proto3" json:"self_delegation_unbonding,omitempty"` + // total_delegation returns the sum of all $KYVE users + // have delegated to this staker + TotalDelegation uint64 `protobuf:"varint,5,opt,name=total_delegation,json=totalDelegation,proto3" json:"total_delegation,omitempty"` + // delegator_count is the total number of individual + // delegator addresses for that user. + DelegatorCount uint64 `protobuf:"varint,6,opt,name=delegator_count,json=delegatorCount,proto3" json:"delegator_count,omitempty"` + // pools is a list of all pools the staker is currently + // participating, i.e. allowed to vote and upload data. + Pools []*PoolMembership `protobuf:"bytes,7,rep,name=pools,proto3" json:"pools,omitempty"` +} + +func (m *FullStaker) Reset() { *m = FullStaker{} } +func (m *FullStaker) String() string { return proto.CompactTextString(m) } +func (*FullStaker) ProtoMessage() {} +func (*FullStaker) Descriptor() ([]byte, []int) { + return fileDescriptor_6b41255feae93a15, []int{1} +} +func (m *FullStaker) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FullStaker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FullStaker.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FullStaker) XXX_Merge(src proto.Message) { + xxx_messageInfo_FullStaker.Merge(m, src) +} +func (m *FullStaker) XXX_Size() int { + return m.Size() +} +func (m *FullStaker) XXX_DiscardUnknown() { + xxx_messageInfo_FullStaker.DiscardUnknown(m) +} + +var xxx_messageInfo_FullStaker proto.InternalMessageInfo + +func (m *FullStaker) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *FullStaker) GetMetadata() *StakerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *FullStaker) GetSelfDelegation() uint64 { + if m != nil { + return m.SelfDelegation + } + return 0 +} + +func (m *FullStaker) GetSelfDelegationUnbonding() uint64 { + if m != nil { + return m.SelfDelegationUnbonding + } + return 0 +} + +func (m *FullStaker) GetTotalDelegation() uint64 { + if m != nil { + return m.TotalDelegation + } + return 0 +} + +func (m *FullStaker) GetDelegatorCount() uint64 { + if m != nil { + return m.DelegatorCount + } + return 0 +} + +func (m *FullStaker) GetPools() []*PoolMembership { + if m != nil { + return m.Pools + } + return nil +} + +// StakerMetadata contains static information for a staker +type StakerMetadata struct { + // commission is the percentage of the rewards that will + // get transferred to the staker before the remaining + // rewards are split across all delegators + Commission string `protobuf:"bytes,1,opt,name=commission,proto3" json:"commission,omitempty"` + // moniker is a human-readable name for displaying + // the staker in the UI + Moniker string `protobuf:"bytes,2,opt,name=moniker,proto3" json:"moniker,omitempty"` + // website is a https-link to the website of the staker + Website string `protobuf:"bytes,3,opt,name=website,proto3" json:"website,omitempty"` + // logo is a link to an image file (like jpg or png) + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` + // pending_commission_change shows if the staker plans + // to change its commission. Delegators will see a warning in + // the UI. A Commission change takes some time until + // the commission is applied. Users have time to redelegate + // if they not agree with the new commission. + PendingCommissionChange *CommissionChangeEntry `protobuf:"bytes,5,opt,name=pending_commission_change,json=pendingCommissionChange,proto3" json:"pending_commission_change,omitempty"` +} + +func (m *StakerMetadata) Reset() { *m = StakerMetadata{} } +func (m *StakerMetadata) String() string { return proto.CompactTextString(m) } +func (*StakerMetadata) ProtoMessage() {} +func (*StakerMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_6b41255feae93a15, []int{2} +} +func (m *StakerMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StakerMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StakerMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StakerMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_StakerMetadata.Merge(m, src) +} +func (m *StakerMetadata) XXX_Size() int { + return m.Size() +} +func (m *StakerMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_StakerMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_StakerMetadata proto.InternalMessageInfo + +func (m *StakerMetadata) GetCommission() string { + if m != nil { + return m.Commission + } + return "" +} + +func (m *StakerMetadata) GetMoniker() string { + if m != nil { + return m.Moniker + } + return "" +} + +func (m *StakerMetadata) GetWebsite() string { + if m != nil { + return m.Website + } + return "" +} + +func (m *StakerMetadata) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +func (m *StakerMetadata) GetPendingCommissionChange() *CommissionChangeEntry { + if m != nil { + return m.PendingCommissionChange + } + return nil +} + +// CommissionChangeEntry shows when the old commission +// of a staker will change to the new commission +type CommissionChangeEntry struct { + // commission is the new commission that will + // become active once the change-time is over + Commission string `protobuf:"bytes,1,opt,name=commission,proto3" json:"commission,omitempty"` + // creation_date is the UNIX-timestamp (in seconds) + // of when the entry was created. + CreationDate int64 `protobuf:"varint,2,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` +} + +func (m *CommissionChangeEntry) Reset() { *m = CommissionChangeEntry{} } +func (m *CommissionChangeEntry) String() string { return proto.CompactTextString(m) } +func (*CommissionChangeEntry) ProtoMessage() {} +func (*CommissionChangeEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_6b41255feae93a15, []int{3} +} +func (m *CommissionChangeEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommissionChangeEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommissionChangeEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommissionChangeEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommissionChangeEntry.Merge(m, src) +} +func (m *CommissionChangeEntry) XXX_Size() int { + return m.Size() +} +func (m *CommissionChangeEntry) XXX_DiscardUnknown() { + xxx_messageInfo_CommissionChangeEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_CommissionChangeEntry proto.InternalMessageInfo + +func (m *CommissionChangeEntry) GetCommission() string { + if m != nil { + return m.Commission + } + return "" +} + +func (m *CommissionChangeEntry) GetCreationDate() int64 { + if m != nil { + return m.CreationDate + } + return 0 +} + +// PoolMembership shows in which pool the staker +// is participating +type PoolMembership struct { + // pool contains useful information about the pool + Pool *BasicPool `protobuf:"bytes,1,opt,name=pool,proto3" json:"pool,omitempty"` + // points indicates if the staker is inactive + // If the staker misses a vote, a point is added. + // After 5 points the staker is removed from + // the stakers set. + Points uint64 `protobuf:"varint,2,opt,name=points,proto3" json:"points,omitempty"` + // is_leaving indicates if a user has scheduled a + // a PoolLeave entry. After the leave-time is over + // the staker will no longer participate in that pool + IsLeaving bool `protobuf:"varint,3,opt,name=is_leaving,json=isLeaving,proto3" json:"is_leaving,omitempty"` + // Valaddress is the address which is authorized to vote + // and submit bundles. If the server gets compromised + // the staker can just change the valaddress. + Valaddress string `protobuf:"bytes,4,opt,name=valaddress,proto3" json:"valaddress,omitempty"` + // balance is the valaddress account balance and indicates + // whether or not the valaccount needs additional funds to + // pay for gas fees + Balance uint64 `protobuf:"varint,5,opt,name=balance,proto3" json:"balance,omitempty"` +} + +func (m *PoolMembership) Reset() { *m = PoolMembership{} } +func (m *PoolMembership) String() string { return proto.CompactTextString(m) } +func (*PoolMembership) ProtoMessage() {} +func (*PoolMembership) Descriptor() ([]byte, []int) { + return fileDescriptor_6b41255feae93a15, []int{4} +} +func (m *PoolMembership) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PoolMembership) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PoolMembership.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PoolMembership) XXX_Merge(src proto.Message) { + xxx_messageInfo_PoolMembership.Merge(m, src) +} +func (m *PoolMembership) XXX_Size() int { + return m.Size() +} +func (m *PoolMembership) XXX_DiscardUnknown() { + xxx_messageInfo_PoolMembership.DiscardUnknown(m) +} + +var xxx_messageInfo_PoolMembership proto.InternalMessageInfo + +func (m *PoolMembership) GetPool() *BasicPool { + if m != nil { + return m.Pool + } + return nil +} + +func (m *PoolMembership) GetPoints() uint64 { + if m != nil { + return m.Points + } + return 0 +} + +func (m *PoolMembership) GetIsLeaving() bool { + if m != nil { + return m.IsLeaving + } + return false +} + +func (m *PoolMembership) GetValaddress() string { + if m != nil { + return m.Valaddress + } + return "" +} + +func (m *PoolMembership) GetBalance() uint64 { + if m != nil { + return m.Balance + } + return 0 +} + +func init() { + proto.RegisterType((*BasicPool)(nil), "kyve.query.v1beta1.BasicPool") + proto.RegisterType((*FullStaker)(nil), "kyve.query.v1beta1.FullStaker") + proto.RegisterType((*StakerMetadata)(nil), "kyve.query.v1beta1.StakerMetadata") + proto.RegisterType((*CommissionChangeEntry)(nil), "kyve.query.v1beta1.CommissionChangeEntry") + proto.RegisterType((*PoolMembership)(nil), "kyve.query.v1beta1.PoolMembership") +} + +func init() { proto.RegisterFile("kyve/query/v1beta1/query.proto", fileDescriptor_6b41255feae93a15) } + +var fileDescriptor_6b41255feae93a15 = []byte{ + // 671 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x94, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xc7, 0xeb, 0x24, 0x4d, 0x9b, 0xc9, 0xd7, 0xf4, 0xd3, 0x4a, 0x50, 0x17, 0x51, 0x53, 0x05, + 0xa1, 0xb6, 0x1c, 0x12, 0xb5, 0x08, 0x09, 0x71, 0xe0, 0xd0, 0xb4, 0x95, 0x10, 0x14, 0x21, 0x23, + 0x90, 0x40, 0x48, 0xd6, 0xda, 0xde, 0x26, 0xab, 0xd8, 0xbb, 0x66, 0x77, 0x9d, 0x92, 0xb7, 0xe0, + 0x51, 0x38, 0xf0, 0x10, 0x1c, 0x7b, 0x84, 0x13, 0xa8, 0x7d, 0x11, 0xb4, 0xbb, 0xb6, 0x49, 0x4a, + 0x10, 0x37, 0xcf, 0x7f, 0xfe, 0x33, 0xc9, 0xfc, 0x66, 0x6c, 0xf0, 0xc6, 0xd3, 0x09, 0xe9, 0x7f, + 0xc8, 0x89, 0x98, 0xf6, 0x27, 0xfb, 0x21, 0x51, 0x78, 0xdf, 0x46, 0xbd, 0x4c, 0x70, 0xc5, 0x11, + 0xd2, 0xf9, 0x9e, 0x55, 0x8a, 0xfc, 0xad, 0xdb, 0xa6, 0x26, 0xe3, 0x3c, 0xa9, 0x4a, 0x74, 0x60, + 0x2b, 0xba, 0x9f, 0x6b, 0xd0, 0x3a, 0xc4, 0x92, 0x46, 0x2f, 0x39, 0x4f, 0x50, 0x07, 0x6a, 0x34, + 0x76, 0x9d, 0x6d, 0x67, 0xb7, 0xe1, 0xd7, 0x68, 0x8c, 0x10, 0x34, 0x18, 0x4e, 0x89, 0x5b, 0xdb, + 0x76, 0x76, 0x5b, 0xbe, 0x79, 0x46, 0x2e, 0xac, 0x88, 0x9c, 0x29, 0x9a, 0x12, 0xb7, 0x6e, 0xe4, + 0x32, 0xd4, 0xee, 0x84, 0x0f, 0xb9, 0xdb, 0xb0, 0x6e, 0xfd, 0x8c, 0xee, 0x41, 0x87, 0x67, 0x44, + 0x60, 0x45, 0xd9, 0x30, 0x88, 0xb8, 0x54, 0xee, 0xb2, 0xe9, 0xbe, 0x56, 0xa9, 0x03, 0x2e, 0x15, + 0xda, 0x81, 0xf5, 0x3c, 0x4b, 0x38, 0x8e, 0x03, 0xca, 0x14, 0x11, 0x13, 0x9c, 0xb8, 0x4d, 0xe3, + 0xeb, 0x58, 0xf9, 0x69, 0xa1, 0xa2, 0x3b, 0xd0, 0x56, 0x5c, 0xe1, 0x24, 0x38, 0xcb, 0x59, 0x2c, + 0xdd, 0x15, 0x63, 0x02, 0x23, 0x9d, 0x68, 0x05, 0xed, 0xc1, 0xff, 0xd6, 0x10, 0x93, 0x84, 0x0c, + 0xb1, 0xa2, 0x9c, 0xb9, 0xab, 0xc6, 0xb5, 0x6e, 0xf4, 0xa3, 0x4a, 0x46, 0x0f, 0xa1, 0x29, 0x15, + 0x56, 0xb9, 0x74, 0x5b, 0xdb, 0xce, 0x6e, 0xe7, 0x60, 0xab, 0x67, 0xf0, 0x19, 0x3a, 0x05, 0xaa, + 0x9e, 0xc6, 0xf2, 0xca, 0x98, 0xfc, 0xc2, 0xdc, 0xfd, 0x5e, 0x03, 0x38, 0xc9, 0x13, 0x2d, 0x8f, + 0x89, 0xd0, 0x3c, 0x70, 0x1c, 0x0b, 0x22, 0xa5, 0x01, 0xd7, 0xf2, 0xcb, 0x10, 0x3d, 0x81, 0xd5, + 0x94, 0x28, 0x1c, 0x63, 0x85, 0x0d, 0xc1, 0xf6, 0x41, 0xb7, 0xf7, 0xe7, 0x82, 0x7a, 0xb6, 0xcf, + 0x69, 0xe1, 0xf4, 0xab, 0x1a, 0x0d, 0x45, 0x92, 0xe4, 0x6c, 0x76, 0x92, 0xba, 0x85, 0xa2, 0xe5, + 0x99, 0x41, 0x1e, 0xc3, 0xe6, 0x35, 0x63, 0x90, 0xb3, 0x90, 0xb3, 0x98, 0xb2, 0xa1, 0xd9, 0x46, + 0xc3, 0xdf, 0x98, 0x2f, 0x79, 0x5d, 0xa6, 0x17, 0xf2, 0x5a, 0x5e, 0xcc, 0x6b, 0x07, 0xd6, 0x0b, + 0x13, 0x17, 0x41, 0xc4, 0x73, 0xa6, 0xca, 0x25, 0x55, 0xf2, 0x40, 0xab, 0xe8, 0x11, 0x2c, 0x6b, + 0x88, 0x7a, 0x3d, 0xf5, 0xbf, 0x4d, 0xad, 0xc1, 0x9e, 0x92, 0x34, 0x24, 0x42, 0x8e, 0x68, 0xe6, + 0xdb, 0x82, 0xee, 0x0f, 0x07, 0x3a, 0xf3, 0x3c, 0x90, 0x07, 0x10, 0xf1, 0x34, 0xa5, 0x52, 0xea, + 0xbf, 0x66, 0x11, 0xcf, 0x28, 0x9a, 0x7f, 0xca, 0x19, 0x1d, 0x13, 0x51, 0x9c, 0x69, 0x19, 0xea, + 0xcc, 0x39, 0x09, 0x25, 0x55, 0xd5, 0xa5, 0x16, 0xe1, 0xc2, 0x4b, 0x25, 0xb0, 0x99, 0x11, 0xc3, + 0x24, 0xf8, 0xdd, 0x3d, 0x88, 0x46, 0x98, 0x0d, 0x89, 0x21, 0xd2, 0x3e, 0xd8, 0x5b, 0x34, 0xc8, + 0xa0, 0x32, 0x0f, 0x8c, 0xf7, 0x98, 0x29, 0x31, 0xf5, 0x37, 0x8a, 0x5e, 0xd7, 0xb3, 0xdd, 0xf7, + 0x70, 0x63, 0x61, 0xc5, 0x3f, 0xe7, 0xbc, 0x0b, 0x6b, 0x91, 0x20, 0x76, 0xbb, 0x31, 0x56, 0xf6, + 0xa5, 0xac, 0xfb, 0xff, 0x95, 0xe2, 0x11, 0x56, 0xa4, 0xfb, 0xc5, 0x81, 0xce, 0x3c, 0x59, 0xb4, + 0x0f, 0x0d, 0xcd, 0xd6, 0x74, 0x6c, 0x97, 0x37, 0x3e, 0x3f, 0x42, 0xf5, 0x01, 0xf0, 0x8d, 0x15, + 0xdd, 0x84, 0x66, 0xc6, 0x29, 0x53, 0xd2, 0xfc, 0x46, 0xc3, 0x2f, 0x22, 0xb4, 0x05, 0x40, 0x65, + 0x90, 0x10, 0x3c, 0xd1, 0x87, 0xa5, 0x99, 0xae, 0xfa, 0x2d, 0x2a, 0x9f, 0x5b, 0x41, 0x4f, 0x30, + 0xc1, 0x49, 0xf9, 0x32, 0x58, 0xb6, 0x33, 0x8a, 0xde, 0x47, 0x88, 0x13, 0xcc, 0x22, 0x52, 0x5c, + 0x58, 0x19, 0x1e, 0x1e, 0x7d, 0xbd, 0xf4, 0x9c, 0x8b, 0x4b, 0xcf, 0xf9, 0x79, 0xe9, 0x39, 0x9f, + 0xae, 0xbc, 0xa5, 0x8b, 0x2b, 0x6f, 0xe9, 0xdb, 0x95, 0xb7, 0xf4, 0xee, 0xfe, 0x90, 0xaa, 0x51, + 0x1e, 0xf6, 0x22, 0x9e, 0xf6, 0x9f, 0xbd, 0x7d, 0x73, 0xfc, 0x82, 0xa8, 0x73, 0x2e, 0xc6, 0xfd, + 0x68, 0x84, 0x29, 0xeb, 0x7f, 0x2c, 0xbe, 0x85, 0x6a, 0x9a, 0x11, 0x19, 0x36, 0xcd, 0x27, 0xed, + 0xc1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0xa5, 0xc1, 0x00, 0x26, 0x05, 0x00, 0x00, +} + +func (m *BasicPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BasicPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BasicPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x48 + } + if m.TotalDelegation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalDelegation)) + i-- + dAtA[i] = 0x40 + } + if m.TotalFunds != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalFunds)) + i-- + dAtA[i] = 0x38 + } + if m.UploadInterval != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.UploadInterval)) + i-- + dAtA[i] = 0x30 + } + if m.OperatingCost != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.OperatingCost)) + i-- + dAtA[i] = 0x28 + } + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Runtime) > 0 { + i -= len(m.Runtime) + copy(dAtA[i:], m.Runtime) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Runtime))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.Id != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FullStaker) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FullStaker) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FullStaker) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pools) > 0 { + for iNdEx := len(m.Pools) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Pools[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.DelegatorCount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.DelegatorCount)) + i-- + dAtA[i] = 0x30 + } + if m.TotalDelegation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalDelegation)) + i-- + dAtA[i] = 0x28 + } + if m.SelfDelegationUnbonding != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SelfDelegationUnbonding)) + i-- + dAtA[i] = 0x20 + } + if m.SelfDelegation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.SelfDelegation)) + i-- + dAtA[i] = 0x18 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StakerMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StakerMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StakerMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PendingCommissionChange != nil { + { + size, err := m.PendingCommissionChange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Website) > 0 { + i -= len(m.Website) + copy(dAtA[i:], m.Website) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Website))) + i-- + dAtA[i] = 0x1a + } + if len(m.Moniker) > 0 { + i -= len(m.Moniker) + copy(dAtA[i:], m.Moniker) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Moniker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Commission) > 0 { + i -= len(m.Commission) + copy(dAtA[i:], m.Commission) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Commission))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CommissionChangeEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommissionChangeEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommissionChangeEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreationDate != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.CreationDate)) + i-- + dAtA[i] = 0x10 + } + if len(m.Commission) > 0 { + i -= len(m.Commission) + copy(dAtA[i:], m.Commission) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Commission))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PoolMembership) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PoolMembership) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PoolMembership) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Balance != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Balance)) + i-- + dAtA[i] = 0x28 + } + if len(m.Valaddress) > 0 { + i -= len(m.Valaddress) + copy(dAtA[i:], m.Valaddress) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Valaddress))) + i-- + dAtA[i] = 0x22 + } + if m.IsLeaving { + i-- + if m.IsLeaving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Points != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Points)) + i-- + dAtA[i] = 0x10 + } + if m.Pool != nil { + { + size, err := m.Pool.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BasicPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovQuery(uint64(m.Id)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Runtime) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.OperatingCost != 0 { + n += 1 + sovQuery(uint64(m.OperatingCost)) + } + if m.UploadInterval != 0 { + n += 1 + sovQuery(uint64(m.UploadInterval)) + } + if m.TotalFunds != 0 { + n += 1 + sovQuery(uint64(m.TotalFunds)) + } + if m.TotalDelegation != 0 { + n += 1 + sovQuery(uint64(m.TotalDelegation)) + } + if m.Status != 0 { + n += 1 + sovQuery(uint64(m.Status)) + } + return n +} + +func (m *FullStaker) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.SelfDelegation != 0 { + n += 1 + sovQuery(uint64(m.SelfDelegation)) + } + if m.SelfDelegationUnbonding != 0 { + n += 1 + sovQuery(uint64(m.SelfDelegationUnbonding)) + } + if m.TotalDelegation != 0 { + n += 1 + sovQuery(uint64(m.TotalDelegation)) + } + if m.DelegatorCount != 0 { + n += 1 + sovQuery(uint64(m.DelegatorCount)) + } + if len(m.Pools) > 0 { + for _, e := range m.Pools { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *StakerMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commission) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Moniker) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Website) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.PendingCommissionChange != nil { + l = m.PendingCommissionChange.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *CommissionChangeEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commission) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.CreationDate != 0 { + n += 1 + sovQuery(uint64(m.CreationDate)) + } + return n +} + +func (m *PoolMembership) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pool != nil { + l = m.Pool.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Points != 0 { + n += 1 + sovQuery(uint64(m.Points)) + } + if m.IsLeaving { + n += 2 + } + l = len(m.Valaddress) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Balance != 0 { + n += 1 + sovQuery(uint64(m.Balance)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BasicPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BasicPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BasicPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtime = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingCost", wireType) + } + m.OperatingCost = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OperatingCost |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UploadInterval", wireType) + } + m.UploadInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UploadInterval |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalFunds", wireType) + } + m.TotalFunds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalFunds |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDelegation", wireType) + } + m.TotalDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= types.PoolStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FullStaker) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FullStaker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FullStaker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &StakerMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfDelegation", wireType) + } + m.SelfDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SelfDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfDelegationUnbonding", wireType) + } + m.SelfDelegationUnbonding = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SelfDelegationUnbonding |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalDelegation", wireType) + } + m.TotalDelegation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalDelegation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelegatorCount", wireType) + } + m.DelegatorCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelegatorCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pools", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pools = append(m.Pools, &PoolMembership{}) + if err := m.Pools[len(m.Pools)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StakerMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StakerMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StakerMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commission", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commission = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Moniker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Website = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingCommissionChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PendingCommissionChange == nil { + m.PendingCommissionChange = &CommissionChangeEntry{} + } + if err := m.PendingCommissionChange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommissionChangeEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommissionChangeEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommissionChangeEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commission", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commission = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationDate", wireType) + } + m.CreationDate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationDate |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PoolMembership) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PoolMembership: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PoolMembership: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pool", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pool == nil { + m.Pool = &BasicPool{} + } + if err := m.Pool.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Points", wireType) + } + m.Points = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Points |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLeaving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLeaving = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Valaddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Valaddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) + } + m.Balance = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Balance |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/stakers.pb.go b/x/query/types/stakers.pb.go new file mode 100644 index 00000000..19a91220 --- /dev/null +++ b/x/query/types/stakers.pb.go @@ -0,0 +1,2286 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/query/v1beta1/stakers.proto + +package types + +import ( + context "context" + fmt "fmt" + types "github.com/KYVENetwork/chain/x/stakers/types" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// StakerStatus ... +type StakerStatus int32 + +const ( + // STAKER_STATUS_UNSPECIFIED ... + STAKER_STATUS_UNSPECIFIED StakerStatus = 0 + // STAKER_STATUS_ACTIVE ... + STAKER_STATUS_ACTIVE StakerStatus = 1 + // STAKER_STATUS_INACTIVE ... + STAKER_STATUS_INACTIVE StakerStatus = 2 +) + +var StakerStatus_name = map[int32]string{ + 0: "STAKER_STATUS_UNSPECIFIED", + 1: "STAKER_STATUS_ACTIVE", + 2: "STAKER_STATUS_INACTIVE", +} + +var StakerStatus_value = map[string]int32{ + "STAKER_STATUS_UNSPECIFIED": 0, + "STAKER_STATUS_ACTIVE": 1, + "STAKER_STATUS_INACTIVE": 2, +} + +func (x StakerStatus) String() string { + return proto.EnumName(StakerStatus_name, int32(x)) +} + +func (StakerStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{0} +} + +// QueryStakersRequest is the request type for the Query/Stakers RPC method. +type QueryStakersRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` + // status looks whether a staker is participating in pools or not + Status StakerStatus `protobuf:"varint,2,opt,name=status,proto3,enum=kyve.query.v1beta1.StakerStatus" json:"status,omitempty"` + // search searches for moniker OR address + Search string `protobuf:"bytes,3,opt,name=search,proto3" json:"search,omitempty"` +} + +func (m *QueryStakersRequest) Reset() { *m = QueryStakersRequest{} } +func (m *QueryStakersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryStakersRequest) ProtoMessage() {} +func (*QueryStakersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{0} +} +func (m *QueryStakersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersRequest.Merge(m, src) +} +func (m *QueryStakersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersRequest proto.InternalMessageInfo + +func (m *QueryStakersRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func (m *QueryStakersRequest) GetStatus() StakerStatus { + if m != nil { + return m.Status + } + return STAKER_STATUS_UNSPECIFIED +} + +func (m *QueryStakersRequest) GetSearch() string { + if m != nil { + return m.Search + } + return "" +} + +// QueryStakersResponse is the response type for the Query/Stakers RPC method. +type QueryStakersResponse struct { + // stakers ... + Stakers []FullStaker `protobuf:"bytes,1,rep,name=stakers,proto3" json:"stakers"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryStakersResponse) Reset() { *m = QueryStakersResponse{} } +func (m *QueryStakersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryStakersResponse) ProtoMessage() {} +func (*QueryStakersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{1} +} +func (m *QueryStakersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersResponse.Merge(m, src) +} +func (m *QueryStakersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersResponse proto.InternalMessageInfo + +func (m *QueryStakersResponse) GetStakers() []FullStaker { + if m != nil { + return m.Stakers + } + return nil +} + +func (m *QueryStakersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryStakerRequest is the request type for the Query/Staker RPC method. +type QueryStakerRequest struct { + // address ... + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` +} + +func (m *QueryStakerRequest) Reset() { *m = QueryStakerRequest{} } +func (m *QueryStakerRequest) String() string { return proto.CompactTextString(m) } +func (*QueryStakerRequest) ProtoMessage() {} +func (*QueryStakerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{2} +} +func (m *QueryStakerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakerRequest.Merge(m, src) +} +func (m *QueryStakerRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryStakerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakerRequest proto.InternalMessageInfo + +func (m *QueryStakerRequest) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +// QueryStakerResponse is the response type for the Query/Staker RPC method. +type QueryStakerResponse struct { + // staker ... + Staker FullStaker `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker"` +} + +func (m *QueryStakerResponse) Reset() { *m = QueryStakerResponse{} } +func (m *QueryStakerResponse) String() string { return proto.CompactTextString(m) } +func (*QueryStakerResponse) ProtoMessage() {} +func (*QueryStakerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{3} +} +func (m *QueryStakerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakerResponse.Merge(m, src) +} +func (m *QueryStakerResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStakerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakerResponse proto.InternalMessageInfo + +func (m *QueryStakerResponse) GetStaker() FullStaker { + if m != nil { + return m.Staker + } + return FullStaker{} +} + +// QueryStakersByPoolRequest is the request type for the Query/Staker RPC method. +type QueryStakersByPoolRequest struct { + // pool_id ... + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` +} + +func (m *QueryStakersByPoolRequest) Reset() { *m = QueryStakersByPoolRequest{} } +func (m *QueryStakersByPoolRequest) String() string { return proto.CompactTextString(m) } +func (*QueryStakersByPoolRequest) ProtoMessage() {} +func (*QueryStakersByPoolRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{4} +} +func (m *QueryStakersByPoolRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersByPoolRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersByPoolRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersByPoolRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersByPoolRequest.Merge(m, src) +} +func (m *QueryStakersByPoolRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersByPoolRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersByPoolRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersByPoolRequest proto.InternalMessageInfo + +func (m *QueryStakersByPoolRequest) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +// QueryStakersByPoolResponse is the response type for the Query/Staker RPC method. +type QueryStakersByPoolResponse struct { + // stakers ... + Stakers []StakerPoolResponse `protobuf:"bytes,1,rep,name=stakers,proto3" json:"stakers"` +} + +func (m *QueryStakersByPoolResponse) Reset() { *m = QueryStakersByPoolResponse{} } +func (m *QueryStakersByPoolResponse) String() string { return proto.CompactTextString(m) } +func (*QueryStakersByPoolResponse) ProtoMessage() {} +func (*QueryStakersByPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{5} +} +func (m *QueryStakersByPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersByPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersByPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersByPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersByPoolResponse.Merge(m, src) +} +func (m *QueryStakersByPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersByPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersByPoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersByPoolResponse proto.InternalMessageInfo + +func (m *QueryStakersByPoolResponse) GetStakers() []StakerPoolResponse { + if m != nil { + return m.Stakers + } + return nil +} + +// StakerPoolResponse ... +type StakerPoolResponse struct { + // staker ... + Staker *FullStaker `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // valaccount ... + Valaccount *types.Valaccount `protobuf:"bytes,2,opt,name=valaccount,proto3" json:"valaccount,omitempty"` +} + +func (m *StakerPoolResponse) Reset() { *m = StakerPoolResponse{} } +func (m *StakerPoolResponse) String() string { return proto.CompactTextString(m) } +func (*StakerPoolResponse) ProtoMessage() {} +func (*StakerPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{6} +} +func (m *StakerPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StakerPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StakerPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StakerPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StakerPoolResponse.Merge(m, src) +} +func (m *StakerPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *StakerPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StakerPoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StakerPoolResponse proto.InternalMessageInfo + +func (m *StakerPoolResponse) GetStaker() *FullStaker { + if m != nil { + return m.Staker + } + return nil +} + +func (m *StakerPoolResponse) GetValaccount() *types.Valaccount { + if m != nil { + return m.Valaccount + } + return nil +} + +// QueryStakersByPoolCountRequest ... +type QueryStakersByPoolCountRequest struct { + // pagination defines an optional pagination for the request. + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryStakersByPoolCountRequest) Reset() { *m = QueryStakersByPoolCountRequest{} } +func (m *QueryStakersByPoolCountRequest) String() string { return proto.CompactTextString(m) } +func (*QueryStakersByPoolCountRequest) ProtoMessage() {} +func (*QueryStakersByPoolCountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{7} +} +func (m *QueryStakersByPoolCountRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersByPoolCountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersByPoolCountRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersByPoolCountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersByPoolCountRequest.Merge(m, src) +} +func (m *QueryStakersByPoolCountRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersByPoolCountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersByPoolCountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersByPoolCountRequest proto.InternalMessageInfo + +func (m *QueryStakersByPoolCountRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryStakersByPoolCountResponse ... +type QueryStakersByPoolCountResponse struct { + // stakers ... + Stakers []FullStaker `protobuf:"bytes,1,rep,name=stakers,proto3" json:"stakers"` + // pagination defines the pagination in the response. + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryStakersByPoolCountResponse) Reset() { *m = QueryStakersByPoolCountResponse{} } +func (m *QueryStakersByPoolCountResponse) String() string { return proto.CompactTextString(m) } +func (*QueryStakersByPoolCountResponse) ProtoMessage() {} +func (*QueryStakersByPoolCountResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6aa31a681566da33, []int{8} +} +func (m *QueryStakersByPoolCountResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryStakersByPoolCountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryStakersByPoolCountResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryStakersByPoolCountResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryStakersByPoolCountResponse.Merge(m, src) +} +func (m *QueryStakersByPoolCountResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryStakersByPoolCountResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryStakersByPoolCountResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryStakersByPoolCountResponse proto.InternalMessageInfo + +func (m *QueryStakersByPoolCountResponse) GetStakers() []FullStaker { + if m != nil { + return m.Stakers + } + return nil +} + +func (m *QueryStakersByPoolCountResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func init() { + proto.RegisterEnum("kyve.query.v1beta1.StakerStatus", StakerStatus_name, StakerStatus_value) + proto.RegisterType((*QueryStakersRequest)(nil), "kyve.query.v1beta1.QueryStakersRequest") + proto.RegisterType((*QueryStakersResponse)(nil), "kyve.query.v1beta1.QueryStakersResponse") + proto.RegisterType((*QueryStakerRequest)(nil), "kyve.query.v1beta1.QueryStakerRequest") + proto.RegisterType((*QueryStakerResponse)(nil), "kyve.query.v1beta1.QueryStakerResponse") + proto.RegisterType((*QueryStakersByPoolRequest)(nil), "kyve.query.v1beta1.QueryStakersByPoolRequest") + proto.RegisterType((*QueryStakersByPoolResponse)(nil), "kyve.query.v1beta1.QueryStakersByPoolResponse") + proto.RegisterType((*StakerPoolResponse)(nil), "kyve.query.v1beta1.StakerPoolResponse") + proto.RegisterType((*QueryStakersByPoolCountRequest)(nil), "kyve.query.v1beta1.QueryStakersByPoolCountRequest") + proto.RegisterType((*QueryStakersByPoolCountResponse)(nil), "kyve.query.v1beta1.QueryStakersByPoolCountResponse") +} + +func init() { proto.RegisterFile("kyve/query/v1beta1/stakers.proto", fileDescriptor_6aa31a681566da33) } + +var fileDescriptor_6aa31a681566da33 = []byte{ + // 739 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4f, 0x4f, 0x13, 0x4d, + 0x1c, 0xee, 0x94, 0xa6, 0x0d, 0x03, 0xef, 0x1b, 0x32, 0x2f, 0xe1, 0x2d, 0xab, 0x2c, 0xcd, 0x6a, + 0x4a, 0x45, 0xd9, 0x09, 0x45, 0x8d, 0x07, 0x63, 0xe4, 0x4f, 0x6b, 0x1a, 0x12, 0x82, 0xdb, 0x42, + 0xa2, 0x97, 0x66, 0xda, 0x4e, 0xb6, 0x9b, 0x96, 0x9d, 0xb2, 0xb3, 0x45, 0x1b, 0xc2, 0x41, 0x2f, + 0x7a, 0x32, 0x26, 0x7e, 0x00, 0x0f, 0xc6, 0x8b, 0x17, 0xfd, 0x18, 0x1c, 0x49, 0xbc, 0x78, 0x32, + 0x06, 0xfc, 0x0e, 0x5e, 0x4d, 0x67, 0x67, 0x71, 0x17, 0x5a, 0x5b, 0x12, 0x0f, 0xde, 0x66, 0x76, + 0x9e, 0x67, 0x7e, 0xcf, 0xf3, 0xcc, 0xfc, 0x66, 0x61, 0xaa, 0xd1, 0xd9, 0xa3, 0x78, 0xb7, 0x4d, + 0x9d, 0x0e, 0xde, 0x5b, 0xac, 0x50, 0x97, 0x2c, 0x62, 0xee, 0x92, 0x06, 0x75, 0xb8, 0xde, 0x72, + 0x98, 0xcb, 0x10, 0xea, 0x22, 0x74, 0x81, 0xd0, 0x25, 0x42, 0x99, 0xaf, 0x32, 0xbe, 0xc3, 0x38, + 0xae, 0x10, 0x7e, 0x96, 0xdc, 0x22, 0xa6, 0x65, 0x13, 0xd7, 0x62, 0xb6, 0xc7, 0x57, 0x26, 0x4d, + 0x66, 0x32, 0x31, 0xc4, 0xdd, 0x91, 0xfc, 0x7a, 0xd9, 0x64, 0xcc, 0x6c, 0x52, 0x4c, 0x5a, 0x16, + 0x26, 0xb6, 0xcd, 0x5c, 0x41, 0x91, 0x35, 0x15, 0xb5, 0x87, 0x2a, 0x4f, 0x81, 0xb7, 0xae, 0x89, + 0x75, 0xa9, 0xb3, 0xb7, 0x6e, 0xed, 0x23, 0x80, 0xff, 0x3d, 0xec, 0x72, 0x8a, 0xde, 0x67, 0x83, + 0xee, 0xb6, 0x29, 0x77, 0x51, 0x1e, 0xc2, 0x5f, 0x1a, 0x93, 0x20, 0x05, 0x32, 0x63, 0xd9, 0xb4, + 0xee, 0x19, 0xd2, 0xbb, 0x86, 0xc2, 0x5e, 0xf5, 0x4d, 0x62, 0x52, 0xc9, 0x35, 0x02, 0x4c, 0x74, + 0x07, 0xc6, 0xb9, 0x4b, 0xdc, 0x36, 0x4f, 0x46, 0x53, 0x20, 0xf3, 0x6f, 0x36, 0xa5, 0x9f, 0x0f, + 0x4a, 0xf7, 0x6a, 0x17, 0x05, 0xce, 0x90, 0x78, 0x34, 0x05, 0xe3, 0x9c, 0x12, 0xa7, 0x5a, 0x4f, + 0x8e, 0xa4, 0x40, 0x66, 0xd4, 0x90, 0x33, 0xed, 0x2d, 0x80, 0x93, 0x61, 0xc5, 0xbc, 0xc5, 0x6c, + 0x4e, 0xd1, 0x3d, 0x98, 0x90, 0xde, 0x92, 0x20, 0x35, 0x92, 0x19, 0xcb, 0xaa, 0xbd, 0x6a, 0xe5, + 0xdb, 0xcd, 0xa6, 0xc7, 0x5c, 0x89, 0x1d, 0x7e, 0x9d, 0x8d, 0x18, 0x3e, 0x09, 0x3d, 0x08, 0x59, + 0x8e, 0x0a, 0xcb, 0x73, 0x03, 0x2d, 0x7b, 0xc5, 0x83, 0x9e, 0x35, 0x1d, 0xa2, 0x80, 0x40, 0x3f, + 0xd1, 0x24, 0x4c, 0x90, 0x5a, 0xcd, 0xa1, 0x9c, 0x8b, 0x38, 0x47, 0x0d, 0x7f, 0xaa, 0x15, 0x43, + 0x47, 0x70, 0xea, 0xe7, 0xae, 0x88, 0xae, 0x41, 0x1d, 0x19, 0xff, 0x70, 0x76, 0x24, 0x47, 0xbb, + 0x09, 0xa7, 0x83, 0x29, 0xad, 0x74, 0x36, 0x19, 0x6b, 0xfa, 0x5a, 0xfe, 0x87, 0x89, 0x16, 0x63, + 0xcd, 0xb2, 0x55, 0x13, 0x7b, 0xc7, 0x8c, 0x78, 0x77, 0x5a, 0xa8, 0x69, 0x35, 0xa8, 0xf4, 0x62, + 0x49, 0x45, 0xf9, 0xb3, 0x09, 0xa7, 0xfb, 0x9f, 0x66, 0x90, 0x78, 0x26, 0x69, 0xed, 0x15, 0x80, + 0xe8, 0x3c, 0x0a, 0xdd, 0xbe, 0x98, 0x61, 0xdf, 0x2a, 0xba, 0x0f, 0xe1, 0x1e, 0x69, 0x92, 0x6a, + 0x95, 0xb5, 0x6d, 0x57, 0x1e, 0x9c, 0xbc, 0x67, 0xfe, 0x65, 0xf7, 0xd9, 0xdb, 0xa7, 0x38, 0x23, + 0xc0, 0xd1, 0xea, 0x50, 0x3d, 0x6f, 0x7b, 0x55, 0xc0, 0xfe, 0x6c, 0x3f, 0x68, 0x1f, 0x00, 0x9c, + 0xed, 0x5b, 0xea, 0x2f, 0xbb, 0xc8, 0xf3, 0x16, 0x1c, 0x0f, 0xb6, 0x26, 0x9a, 0x81, 0xd3, 0xc5, + 0xd2, 0xf2, 0x7a, 0xce, 0x28, 0x17, 0x4b, 0xcb, 0xa5, 0xad, 0x62, 0x79, 0x6b, 0xa3, 0xb8, 0x99, + 0x5b, 0x2d, 0xe4, 0x0b, 0xb9, 0xb5, 0x89, 0x08, 0x4a, 0xc2, 0xc9, 0xf0, 0xf2, 0xf2, 0x6a, 0xa9, + 0xb0, 0x9d, 0x9b, 0x00, 0x48, 0x81, 0x53, 0xe1, 0x95, 0xc2, 0x86, 0x5c, 0x8b, 0x2a, 0xb1, 0x97, + 0xef, 0xd4, 0x48, 0xf6, 0x47, 0x0c, 0x8e, 0x07, 0x73, 0x41, 0xcf, 0x00, 0x4c, 0xf8, 0xe3, 0xb9, + 0x5e, 0xfe, 0x7b, 0xbc, 0x5a, 0x4a, 0x66, 0x30, 0xd0, 0xb3, 0xa9, 0x5d, 0x79, 0xfe, 0xf9, 0xfb, + 0x9b, 0xe8, 0x0c, 0xba, 0x84, 0xfb, 0x3f, 0xed, 0xe8, 0x05, 0x80, 0x71, 0x8f, 0x88, 0xd2, 0x03, + 0x76, 0xf6, 0x15, 0xcc, 0x0d, 0xc4, 0x49, 0x01, 0x37, 0x84, 0x80, 0x34, 0xba, 0xda, 0x5f, 0x00, + 0xde, 0x97, 0x2f, 0xc4, 0x01, 0x7a, 0x0f, 0xe0, 0x3f, 0xa1, 0x1b, 0x83, 0x16, 0x06, 0x59, 0x0d, + 0x75, 0xbc, 0xa2, 0x0f, 0x0b, 0x97, 0xf2, 0x6e, 0x09, 0x79, 0x18, 0x2d, 0xfc, 0x26, 0x9f, 0x72, + 0xa5, 0x53, 0xee, 0xbe, 0x1b, 0x78, 0x5f, 0x3e, 0x26, 0x07, 0xe8, 0xd3, 0x69, 0x67, 0x07, 0x6f, + 0x36, 0xca, 0x0e, 0x57, 0x3d, 0xd8, 0x71, 0xca, 0xd2, 0x85, 0x38, 0x52, 0xf6, 0xa2, 0x90, 0x7d, + 0x1d, 0x5d, 0x1b, 0x42, 0x76, 0x59, 0xf4, 0xfe, 0xca, 0xda, 0xe1, 0xb1, 0x0a, 0x8e, 0x8e, 0x55, + 0xf0, 0xed, 0x58, 0x05, 0xaf, 0x4f, 0xd4, 0xc8, 0xd1, 0x89, 0x1a, 0xf9, 0x72, 0xa2, 0x46, 0x1e, + 0xcf, 0x9b, 0x96, 0x5b, 0x6f, 0x57, 0xf4, 0x2a, 0xdb, 0xc1, 0xeb, 0x8f, 0xb6, 0x73, 0x1b, 0xd4, + 0x7d, 0xc2, 0x9c, 0x06, 0xae, 0xd6, 0x89, 0x65, 0xe3, 0xa7, 0x72, 0x77, 0xb7, 0xd3, 0xa2, 0xbc, + 0x12, 0x17, 0xbf, 0xd3, 0xa5, 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x68, 0x64, 0xf5, 0xe3, 0x2a, + 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryStakersClient is the client API for QueryStakers service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryStakersClient interface { + // Stakers queries for all stakers. + Stakers(ctx context.Context, in *QueryStakersRequest, opts ...grpc.CallOption) (*QueryStakersResponse, error) + // Staker queries for all stakers. + Staker(ctx context.Context, in *QueryStakerRequest, opts ...grpc.CallOption) (*QueryStakerResponse, error) + // StakersByPool queries for all stakers that are currently participating in the given pool + StakersByPool(ctx context.Context, in *QueryStakersByPoolRequest, opts ...grpc.CallOption) (*QueryStakersByPoolResponse, error) + // StakersByPool queries for all stakers and sorted them first by number of pools participating and + // then by delegation + StakersByPoolCount(ctx context.Context, in *QueryStakersByPoolCountRequest, opts ...grpc.CallOption) (*QueryStakersByPoolCountResponse, error) +} + +type queryStakersClient struct { + cc grpc1.ClientConn +} + +func NewQueryStakersClient(cc grpc1.ClientConn) QueryStakersClient { + return &queryStakersClient{cc} +} + +func (c *queryStakersClient) Stakers(ctx context.Context, in *QueryStakersRequest, opts ...grpc.CallOption) (*QueryStakersResponse, error) { + out := new(QueryStakersResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryStakers/Stakers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryStakersClient) Staker(ctx context.Context, in *QueryStakerRequest, opts ...grpc.CallOption) (*QueryStakerResponse, error) { + out := new(QueryStakerResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryStakers/Staker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryStakersClient) StakersByPool(ctx context.Context, in *QueryStakersByPoolRequest, opts ...grpc.CallOption) (*QueryStakersByPoolResponse, error) { + out := new(QueryStakersByPoolResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryStakers/StakersByPool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryStakersClient) StakersByPoolCount(ctx context.Context, in *QueryStakersByPoolCountRequest, opts ...grpc.CallOption) (*QueryStakersByPoolCountResponse, error) { + out := new(QueryStakersByPoolCountResponse) + err := c.cc.Invoke(ctx, "/kyve.query.v1beta1.QueryStakers/StakersByPoolCount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryStakersServer is the server API for QueryStakers service. +type QueryStakersServer interface { + // Stakers queries for all stakers. + Stakers(context.Context, *QueryStakersRequest) (*QueryStakersResponse, error) + // Staker queries for all stakers. + Staker(context.Context, *QueryStakerRequest) (*QueryStakerResponse, error) + // StakersByPool queries for all stakers that are currently participating in the given pool + StakersByPool(context.Context, *QueryStakersByPoolRequest) (*QueryStakersByPoolResponse, error) + // StakersByPool queries for all stakers and sorted them first by number of pools participating and + // then by delegation + StakersByPoolCount(context.Context, *QueryStakersByPoolCountRequest) (*QueryStakersByPoolCountResponse, error) +} + +// UnimplementedQueryStakersServer can be embedded to have forward compatible implementations. +type UnimplementedQueryStakersServer struct { +} + +func (*UnimplementedQueryStakersServer) Stakers(ctx context.Context, req *QueryStakersRequest) (*QueryStakersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stakers not implemented") +} +func (*UnimplementedQueryStakersServer) Staker(ctx context.Context, req *QueryStakerRequest) (*QueryStakerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Staker not implemented") +} +func (*UnimplementedQueryStakersServer) StakersByPool(ctx context.Context, req *QueryStakersByPoolRequest) (*QueryStakersByPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StakersByPool not implemented") +} +func (*UnimplementedQueryStakersServer) StakersByPoolCount(ctx context.Context, req *QueryStakersByPoolCountRequest) (*QueryStakersByPoolCountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StakersByPoolCount not implemented") +} + +func RegisterQueryStakersServer(s grpc1.Server, srv QueryStakersServer) { + s.RegisterService(&_QueryStakers_serviceDesc, srv) +} + +func _QueryStakers_Stakers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryStakersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryStakersServer).Stakers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryStakers/Stakers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryStakersServer).Stakers(ctx, req.(*QueryStakersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryStakers_Staker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryStakerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryStakersServer).Staker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryStakers/Staker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryStakersServer).Staker(ctx, req.(*QueryStakerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryStakers_StakersByPool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryStakersByPoolRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryStakersServer).StakersByPool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryStakers/StakersByPool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryStakersServer).StakersByPool(ctx, req.(*QueryStakersByPoolRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _QueryStakers_StakersByPoolCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryStakersByPoolCountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryStakersServer).StakersByPoolCount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.query.v1beta1.QueryStakers/StakersByPoolCount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryStakersServer).StakersByPoolCount(ctx, req.(*QueryStakersByPoolCountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _QueryStakers_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.query.v1beta1.QueryStakers", + HandlerType: (*QueryStakersServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Stakers", + Handler: _QueryStakers_Stakers_Handler, + }, + { + MethodName: "Staker", + Handler: _QueryStakers_Staker_Handler, + }, + { + MethodName: "StakersByPool", + Handler: _QueryStakers_StakersByPool_Handler, + }, + { + MethodName: "StakersByPoolCount", + Handler: _QueryStakers_StakersByPoolCount_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/query/v1beta1/stakers.proto", +} + +func (m *QueryStakersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Search) > 0 { + i -= len(m.Search) + copy(dAtA[i:], m.Search) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Search))) + i-- + dAtA[i] = 0x1a + } + if m.Status != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x10 + } + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryStakersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Stakers) > 0 { + for iNdEx := len(m.Stakers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stakers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryStakerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryStakerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Staker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryStakersByPoolRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersByPoolRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersByPoolRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolId != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryStakersByPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersByPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersByPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Stakers) > 0 { + for iNdEx := len(m.Stakers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stakers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StakerPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StakerPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StakerPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Valaccount != nil { + { + size, err := m.Valaccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Staker != nil { + { + size, err := m.Staker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryStakersByPoolCountRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersByPoolCountRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersByPoolCountRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryStakersByPoolCountResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryStakersByPoolCountResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryStakersByPoolCountResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Stakers) > 0 { + for iNdEx := len(m.Stakers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stakers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStakers(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStakers(dAtA []byte, offset int, v uint64) int { + offset -= sovStakers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryStakersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovStakers(uint64(l)) + } + if m.Status != 0 { + n += 1 + sovStakers(uint64(m.Status)) + } + l = len(m.Search) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func (m *QueryStakersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stakers) > 0 { + for _, e := range m.Stakers { + l = e.Size() + n += 1 + l + sovStakers(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func (m *QueryStakerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func (m *QueryStakerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Staker.Size() + n += 1 + l + sovStakers(uint64(l)) + return n +} + +func (m *QueryStakersByPoolRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovStakers(uint64(m.PoolId)) + } + return n +} + +func (m *QueryStakersByPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stakers) > 0 { + for _, e := range m.Stakers { + l = e.Size() + n += 1 + l + sovStakers(uint64(l)) + } + } + return n +} + +func (m *StakerPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Staker != nil { + l = m.Staker.Size() + n += 1 + l + sovStakers(uint64(l)) + } + if m.Valaccount != nil { + l = m.Valaccount.Size() + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func (m *QueryStakersByPoolCountRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func (m *QueryStakersByPoolCountResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stakers) > 0 { + for _, e := range m.Stakers { + l = e.Size() + n += 1 + l + sovStakers(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func sovStakers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStakers(x uint64) (n int) { + return sovStakers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryStakersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= StakerStatus(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Search = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stakers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stakers = append(m.Stakers, FullStaker{}) + if err := m.Stakers[len(m.Stakers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Staker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersByPoolRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersByPoolRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersByPoolRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersByPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersByPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersByPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stakers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stakers = append(m.Stakers, StakerPoolResponse{}) + if err := m.Stakers[len(m.Stakers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StakerPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StakerPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StakerPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Staker == nil { + m.Staker = &FullStaker{} + } + if err := m.Staker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Valaccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Valaccount == nil { + m.Valaccount = &types.Valaccount{} + } + if err := m.Valaccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersByPoolCountRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersByPoolCountRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersByPoolCountRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryStakersByPoolCountResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryStakersByPoolCountResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryStakersByPoolCountResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stakers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stakers = append(m.Stakers, FullStaker{}) + if err := m.Stakers[len(m.Stakers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStakers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStakers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStakers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStakers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStakers + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStakers + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStakers + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStakers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStakers = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStakers = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/query/types/stakers.pb.gw.go b/x/query/types/stakers.pb.gw.go new file mode 100644 index 00000000..70ee8fdb --- /dev/null +++ b/x/query/types/stakers.pb.gw.go @@ -0,0 +1,456 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/query/v1beta1/stakers.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_QueryStakers_Stakers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_QueryStakers_Stakers_0(ctx context.Context, marshaler runtime.Marshaler, client QueryStakersClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryStakers_Stakers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Stakers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryStakers_Stakers_0(ctx context.Context, marshaler runtime.Marshaler, server QueryStakersServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryStakers_Stakers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Stakers(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryStakers_Staker_0(ctx context.Context, marshaler runtime.Marshaler, client QueryStakersClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := client.Staker(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryStakers_Staker_0(ctx context.Context, marshaler runtime.Marshaler, server QueryStakersServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakerRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["address"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "address") + } + + protoReq.Address, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "address", err) + } + + msg, err := server.Staker(ctx, &protoReq) + return msg, metadata, err + +} + +func request_QueryStakers_StakersByPool_0(ctx context.Context, marshaler runtime.Marshaler, client QueryStakersClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersByPoolRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + msg, err := client.StakersByPool(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryStakers_StakersByPool_0(ctx context.Context, marshaler runtime.Marshaler, server QueryStakersServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersByPoolRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["pool_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pool_id") + } + + protoReq.PoolId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pool_id", err) + } + + msg, err := server.StakersByPool(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_QueryStakers_StakersByPoolCount_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_QueryStakers_StakersByPoolCount_0(ctx context.Context, marshaler runtime.Marshaler, client QueryStakersClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersByPoolCountRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryStakers_StakersByPoolCount_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.StakersByPoolCount(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_QueryStakers_StakersByPoolCount_0(ctx context.Context, marshaler runtime.Marshaler, server QueryStakersServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryStakersByPoolCountRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_QueryStakers_StakersByPoolCount_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.StakersByPoolCount(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryStakersHandlerServer registers the http handlers for service QueryStakers to "mux". +// UnaryRPC :call QueryStakersServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryStakersHandlerFromEndpoint instead. +func RegisterQueryStakersHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryStakersServer) error { + + mux.Handle("GET", pattern_QueryStakers_Stakers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryStakers_Stakers_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_Stakers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryStakers_Staker_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryStakers_Staker_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_Staker_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryStakers_StakersByPool_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryStakers_StakersByPool_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_StakersByPool_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryStakers_StakersByPoolCount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_QueryStakers_StakersByPoolCount_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_StakersByPoolCount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryStakersHandlerFromEndpoint is same as RegisterQueryStakersHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryStakersHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryStakersHandler(ctx, mux, conn) +} + +// RegisterQueryStakersHandler registers the http handlers for service QueryStakers to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryStakersHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryStakersHandlerClient(ctx, mux, NewQueryStakersClient(conn)) +} + +// RegisterQueryStakersHandlerClient registers the http handlers for service QueryStakers +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryStakersClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryStakersClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryStakersClient" to call the correct interceptors. +func RegisterQueryStakersHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryStakersClient) error { + + mux.Handle("GET", pattern_QueryStakers_Stakers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryStakers_Stakers_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_Stakers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryStakers_Staker_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryStakers_Staker_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_Staker_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryStakers_StakersByPool_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryStakers_StakersByPool_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_StakersByPool_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_QueryStakers_StakersByPoolCount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_QueryStakers_StakersByPoolCount_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_QueryStakers_StakersByPoolCount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_QueryStakers_Stakers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "query", "v1beta1", "stakers"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryStakers_Staker_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "staker", "address"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryStakers_StakersByPool_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "query", "v1beta1", "stakers_by_pool", "pool_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_QueryStakers_StakersByPoolCount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "query", "v1beta1", "stakers_by_pool_count"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_QueryStakers_Stakers_0 = runtime.ForwardResponseMessage + + forward_QueryStakers_Staker_0 = runtime.ForwardResponseMessage + + forward_QueryStakers_StakersByPool_0 = runtime.ForwardResponseMessage + + forward_QueryStakers_StakersByPoolCount_0 = runtime.ForwardResponseMessage +) diff --git a/x/query/types/types.go b/x/query/types/types.go new file mode 100644 index 00000000..ab1254f4 --- /dev/null +++ b/x/query/types/types.go @@ -0,0 +1 @@ +package types diff --git a/x/stakers/client/cli/query.go b/x/stakers/client/cli/query.go new file mode 100644 index 00000000..5816b920 --- /dev/null +++ b/x/stakers/client/cli/query.go @@ -0,0 +1,31 @@ +package cli + +import ( + "fmt" + // "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + // "github.com/cosmos/cosmos-sdk/client/flags" + // sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/KYVENetwork/chain/x/stakers/types" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd(queryRoute string) *cobra.Command { + // Group stakers queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdQueryParams()) + // this line is used by starport scaffolding # 1 + + return cmd +} diff --git a/x/stakers/client/cli/query_params.go b/x/stakers/client/cli/query_params.go new file mode 100644 index 00000000..1af100d4 --- /dev/null +++ b/x/stakers/client/cli/query_params.go @@ -0,0 +1,34 @@ +package cli + +import ( + "context" + + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/spf13/cobra" +) + +func CmdQueryParams() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "shows the parameters of the module", + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + queryClient := types.NewQueryClient(clientCtx) + + res, err := queryClient.Params(context.Background(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/stakers/client/cli/tx.go b/x/stakers/client/cli/tx.go new file mode 100644 index 00000000..f0420583 --- /dev/null +++ b/x/stakers/client/cli/tx.go @@ -0,0 +1,29 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdCreateStaker()) + cmd.AddCommand(CmdJoinPool()) + cmd.AddCommand(CmdLeavePool()) + cmd.AddCommand(CmdUpdateCommission()) + cmd.AddCommand(CmdUpdateMetadata()) + + return cmd +} diff --git a/x/stakers/client/cli/tx_join_pool.go b/x/stakers/client/cli/tx_join_pool.go new file mode 100644 index 00000000..c69669bf --- /dev/null +++ b/x/stakers/client/cli/tx_join_pool.go @@ -0,0 +1,53 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdJoinPool() *cobra.Command { + cmd := &cobra.Command{ + Use: "join-pool [pool_id] [valaddress] [amount]", + Short: "Broadcast message join-pool", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argPoolId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + argValaddress := args[1] + + argAmount, err := cast.ToUint64E(args[2]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgJoinPool{ + Creator: clientCtx.GetFromAddress().String(), + PoolId: argPoolId, + Valaddress: argValaddress, + Amount: argAmount, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/stakers/client/cli/tx_leave_pool.go b/x/stakers/client/cli/tx_leave_pool.go new file mode 100644 index 00000000..51175c72 --- /dev/null +++ b/x/stakers/client/cli/tx_leave_pool.go @@ -0,0 +1,44 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdLeavePool() *cobra.Command { + cmd := &cobra.Command{ + Use: "leave-pool [pool_id]", + Short: "Broadcast message leave-pool", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argPoolId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgLeavePool{ + Creator: clientCtx.GetFromAddress().String(), + PoolId: argPoolId, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/stakers/client/cli/tx_stake.go b/x/stakers/client/cli/tx_stake.go new file mode 100644 index 00000000..310dab85 --- /dev/null +++ b/x/stakers/client/cli/tx_stake.go @@ -0,0 +1,44 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdCreateStaker() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-staker [amount]", + Short: "Broadcast message create-staker", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argAmount, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgCreateStaker{ + Creator: clientCtx.GetFromAddress().String(), + Amount: argAmount, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/stakers/client/cli/tx_update_commission.go b/x/stakers/client/cli/tx_update_commission.go new file mode 100644 index 00000000..4bf925a5 --- /dev/null +++ b/x/stakers/client/cli/tx_update_commission.go @@ -0,0 +1,38 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +func CmdUpdateCommission() *cobra.Command { + cmd := &cobra.Command{ + Use: "update-commission [commission]", + Short: "Broadcast message update-commission", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgUpdateCommission{ + Creator: clientCtx.GetFromAddress().String(), + Commission: args[0], + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/stakers/client/cli/tx_update_metadata.go b/x/stakers/client/cli/tx_update_metadata.go new file mode 100644 index 00000000..0fce70f3 --- /dev/null +++ b/x/stakers/client/cli/tx_update_metadata.go @@ -0,0 +1,40 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cobra" +) + +func CmdUpdateMetadata() *cobra.Command { + cmd := &cobra.Command{ + Use: "update-metadata [moniker] [website] [logo]", + Short: "Broadcast message update-metadata", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgUpdateMetadata{ + Creator: clientCtx.GetFromAddress().String(), + Moniker: args[0], + Website: args[1], + Logo: args[2], + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/stakers/genesis.go b/x/stakers/genesis.go new file mode 100644 index 00000000..b06e790f --- /dev/null +++ b/x/stakers/genesis.go @@ -0,0 +1,54 @@ +package stakers + +import ( + "github.com/KYVENetwork/chain/x/stakers/keeper" + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the capability module's state from a provided genesis +// state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) + + for _, staker := range genState.StakerList { + k.AppendStaker(ctx, staker) + } + + for _, entry := range genState.ValaccountList { + k.SetValaccount(ctx, entry) + k.AddOneToCount(ctx, entry.PoolId) + k.AddActiveStaker(ctx, entry.Staker) + } + + for _, entry := range genState.CommissionChangeEntries { + k.SetCommissionChangeEntry(ctx, entry) + } + + for _, entry := range genState.LeavePoolEntries { + k.SetLeavePoolEntry(ctx, entry) + } + + k.SetQueueState(ctx, types.QUEUE_IDENTIFIER_COMMISSION, genState.QueueStateCommission) + k.SetQueueState(ctx, types.QUEUE_IDENTIFIER_LEAVE, genState.QueueStateLeave) +} + +// ExportGenesis returns the capability module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + genesis.Params = k.GetParams(ctx) + + genesis.StakerList = k.GetAllStakers(ctx) + + genesis.ValaccountList = k.GetAllValaccounts(ctx) + + genesis.CommissionChangeEntries = k.GetAllCommissionChangeEntries(ctx) + + genesis.LeavePoolEntries = k.GetAllLeavePoolEntries(ctx) + + genesis.QueueStateCommission = k.GetQueueState(ctx, types.QUEUE_IDENTIFIER_COMMISSION) + + genesis.QueueStateLeave = k.GetQueueState(ctx, types.QUEUE_IDENTIFIER_LEAVE) + + return genesis +} diff --git a/x/stakers/keeper/exported_functions.go b/x/stakers/keeper/exported_functions.go new file mode 100644 index 00000000..d53765e6 --- /dev/null +++ b/x/stakers/keeper/exported_functions.go @@ -0,0 +1,132 @@ +package keeper + +import ( + "cosmossdk.io/math" + "github.com/KYVENetwork/chain/util" + sdk "github.com/cosmos/cosmos-sdk/types" + + // Gov + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Stakers + "github.com/KYVENetwork/chain/x/stakers/types" +) + +// These functions are meant to be called from external modules. +// For now this is the bundles module and the delegation module +// which need to interact with the stakers module. + +// LeavePool removes a staker from a pool and emits the corresponding event. +// The staker is no longer able to participate in the given pool. +// All points the staker had in that pool are deleted. +func (k Keeper) LeavePool(ctx sdk.Context, staker string, poolId uint64) { + k.RemoveValaccountFromPool(ctx, poolId, staker) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventLeavePool{ + PoolId: poolId, + Staker: staker, + }) +} + +// GetAllStakerAddressesOfPool returns a list of all stakers +// which have currently a valaccount registered for the given pool +// and are therefore allowed to participate in that pool. +func (k Keeper) GetAllStakerAddressesOfPool(ctx sdk.Context, poolId uint64) (stakers []string) { + for _, valaccount := range k.GetAllValaccountsOfPool(ctx, poolId) { + stakers = append(stakers, valaccount.Staker) + } + + return stakers +} + +// GetCommission returns the commission of a staker as a parsed sdk.Dec +func (k Keeper) GetCommission(ctx sdk.Context, stakerAddress string) sdk.Dec { + staker, _ := k.GetStaker(ctx, stakerAddress) + uploaderCommission, err := sdk.NewDecFromStr(staker.Commission) + if err != nil { + util.PanicHalt(k.upgradeKeeper, ctx, "Commission not parsable: "+staker.Commission) + } + return uploaderCommission +} + +// AssertValaccountAuthorized checks if the given `valaddress` is allowed to vote in pool +// with id `poolId` to vote in favor of `stakerAddress`. +// If the valaddress is not authorized the appropriate error is returned. +// Otherwise, it returns `nil` +func (k Keeper) AssertValaccountAuthorized(ctx sdk.Context, poolId uint64, stakerAddress string, valaddress string) error { + valaccount, found := k.GetValaccount(ctx, poolId, stakerAddress) + + if !found { + return types.ErrValaccountUnauthorized + } + + if valaccount.Valaddress != valaddress { + return types.ErrValaccountUnauthorized + } + + return nil +} + +// GetActiveStakers returns all staker-addresses that are +// currently participating in at least one pool. +func (k Keeper) GetActiveStakers(ctx sdk.Context) []string { + return k.getAllActiveStakers(ctx) +} + +// GOVERNANCE - BONDING +// The next functions are used in our custom fork of the cosmos-sdk +// which includes protocol staking into the governance. +// The behavior is exactly the same as with normal cosmos-validators. + +// TotalBondedTokens returns all tokens which are currently bonded by the protocol +// I.e. the sum of all delegation of all stakers that are currently participating +// in at least one pool +func (k Keeper) TotalBondedTokens(ctx sdk.Context) math.Int { + bondedTokens := math.ZeroInt() + + for _, validator := range k.getAllActiveStakers(ctx) { + delegation := int64(k.delegationKeeper.GetDelegationAmount(ctx, validator)) + + bondedTokens = bondedTokens.Add(math.NewInt(delegation)) + } + + return bondedTokens +} + +// GetActiveValidators returns all protocol-node information which +// are needed by the governance to calculate the voting powers. +// The interface needs to correspond to github.com/cosmos/cosmos-sdk/x/gov/types/v1.ValidatorGovInfo +// But as there is no direct dependency in the cosmos-sdk-fork this value is passed as an interface{} +func (k Keeper) GetActiveValidators(ctx sdk.Context) (validators []interface{}) { + for _, address := range k.getAllActiveStakers(ctx) { + delegation := int64(k.delegationKeeper.GetDelegationAmount(ctx, address)) + + validator := govV1Types.NewValidatorGovInfo( + sdk.ValAddress(sdk.MustAccAddressFromBech32(address)), + math.NewInt(delegation), + sdk.NewDec(delegation), + sdk.ZeroDec(), + govV1Types.WeightedVoteOptions{}, + ) + + validators = append(validators, validator) + } + + return +} + +// GetDelegations returns the address and the delegation amount of all active protocol-stakers the +// delegator as delegated to. This is used to calculate the vote weight each delegator has. +func (k Keeper) GetDelegations(ctx sdk.Context, delegator string) (validators []string, amounts []sdk.Dec) { + for _, validator := range k.delegationKeeper.GetStakersByDelegator(ctx, delegator) { + if k.isActiveStaker(ctx, validator) { + validators = append(validators, validator) + + amounts = append( + amounts, + sdk.NewDec(int64(k.delegationKeeper.GetDelegationAmountOfDelegator(ctx, validator, delegator))), + ) + } + } + + return +} diff --git a/x/stakers/keeper/exported_functions_test.go b/x/stakers/keeper/exported_functions_test.go new file mode 100644 index 00000000..62e97845 --- /dev/null +++ b/x/stakers/keeper/exported_functions_test.go @@ -0,0 +1,176 @@ +package keeper_test + +import ( + "strconv" + + kyveApp "github.com/KYVENetwork/chain/app" + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Delegation + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Pool + poolTypes "github.com/KYVENetwork/chain/x/pool/types" + // Stakers + stakersTypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - Protocol Governance Voting + +* Protocol validator doesn't vote, delegator votes. +* Protocol validator votes, delegator doesn't. +* Protocol validator votes, delegator votes the same. +* Protocol validator votes, delegator votes different. + +*/ + +var _ = Describe("Protocol Governance Voting", Ordered, func() { + s := i.NewCleanChain() + + parsedAliceAddr := sdk.MustAccAddressFromBech32(i.ALICE) + parsedBobAddr := sdk.MustAccAddressFromBech32(i.BOB) + + // TODO(postAudit,@john): Think about randomly generating these values. + validatorAmount := 500 * i.KYVE + delegatorAmount := 250 * i.KYVE + + BeforeEach(func() { + s = i.NewCleanChain() + + // Create a test proposal. + proposeTx := CreateTestProposal(s.Ctx(), s.App().Keepers) + _ = s.RunTxSuccess(proposeTx) + + // Initialise a protocol validator. + createTx := &stakersTypes.MsgCreateStaker{ + Creator: i.ALICE, + Amount: validatorAmount, + } + _ = s.RunTxSuccess(createTx) + + // Create and join a pool. + s.App().PoolKeeper.AppendPool(s.Ctx(), poolTypes.Pool{ + Name: "Cosmos Hub", + Protocol: &poolTypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &poolTypes.UpgradePlan{}, + }) + + joinTx := &stakersTypes.MsgJoinPool{ + Creator: i.ALICE, + PoolId: 0, + Valaddress: i.DUMMY[0], + Amount: validatorAmount, + } + _ = s.RunTxSuccess(joinTx) + + // Delegate to protocol validator. + delegateTx := &delegationTypes.MsgDelegate{ + Creator: i.BOB, + Staker: i.ALICE, + Amount: delegatorAmount, + } + _ = s.RunTxSuccess(delegateTx) + + Expect(s.App().StakersKeeper.TotalBondedTokens(s.Ctx()).Uint64()).To(Equal(delegatorAmount + validatorAmount)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Protocol validator doesn't vote, delegator votes.", func() { + // ARRANGE + delegatorTx := govTypes.NewMsgVote( + parsedBobAddr, 1, govTypes.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _ = s.RunTxSuccess(delegatorTx) + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + _, _, tally := s.App().GovKeeper.Tally(s.Ctx(), proposal) + + Expect(tally.YesCount).To(Equal(strconv.Itoa(int(delegatorAmount)))) + }) + + It("Protocol validator votes, delegator doesn't.", func() { + // ARRANGE + validatorTx := govTypes.NewMsgVote( + parsedAliceAddr, 1, govTypes.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _ = s.RunTxSuccess(validatorTx) + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + _, _, tally := s.App().GovKeeper.Tally(s.Ctx(), proposal) + + Expect(tally.YesCount).To(Equal(strconv.Itoa(int(delegatorAmount + validatorAmount)))) + }) + + It("Protocol validator votes, delegator votes the same.", func() { + // ARRANGE + validatorTx := govTypes.NewMsgVote( + parsedAliceAddr, 1, govTypes.VoteOption_VOTE_OPTION_YES, "", + ) + + delegatorTx := govTypes.NewMsgVote( + parsedBobAddr, 1, govTypes.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _ = s.RunTxSuccess(validatorTx) + _ = s.RunTxSuccess(delegatorTx) + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + _, _, tally := s.App().GovKeeper.Tally(s.Ctx(), proposal) + + Expect(tally.YesCount).To(Equal(strconv.Itoa(int(validatorAmount + delegatorAmount)))) + }) + + It("Protocol validator votes, delegator votes different.", func() { + // ARRANGE + validatorTx := govTypes.NewMsgVote( + parsedAliceAddr, 1, govTypes.VoteOption_VOTE_OPTION_YES, "", + ) + + delegatorTx := govTypes.NewMsgVote( + parsedBobAddr, 1, govTypes.VoteOption_VOTE_OPTION_NO, "", + ) + + // ACT + _ = s.RunTxSuccess(validatorTx) + _ = s.RunTxSuccess(delegatorTx) + + // ASSERT + proposal, _ := s.App().GovKeeper.GetProposal(s.Ctx(), 1) + _, _, tally := s.App().GovKeeper.Tally(s.Ctx(), proposal) + + Expect(tally.YesCount).To(Equal(strconv.Itoa(int(validatorAmount)))) + Expect(tally.NoCount).To(Equal(strconv.Itoa(int(delegatorAmount)))) + }) +}) + +func CreateTestProposal(ctx sdk.Context, keepers kyveApp.Keepers) sdk.Msg { + minDeposit := keepers.GovKeeper.GetDepositParams(ctx).MinDeposit + + proposal, _ := govTypes.NewMsgSubmitProposal( + []sdk.Msg{}, minDeposit, i.DUMMY[0], "ipfs://CID", + ) + + return proposal +} diff --git a/x/stakers/keeper/getters_commission.go b/x/stakers/keeper/getters_commission.go new file mode 100644 index 00000000..ad1a0103 --- /dev/null +++ b/x/stakers/keeper/getters_commission.go @@ -0,0 +1,77 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetCommissionChangeEntry ... +func (k Keeper) SetCommissionChangeEntry(ctx sdk.Context, commissionChangeEntry types.CommissionChangeEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefix) + b := k.cdc.MustMarshal(&commissionChangeEntry) + store.Set(types.CommissionChangeEntryKey(commissionChangeEntry.Index), b) + + // Insert the same entry with a different key prefix for query lookup + indexBytes := make([]byte, 8) + binary.BigEndian.PutUint64(indexBytes, commissionChangeEntry.Index) + + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefixIndex2) + indexStore.Set(types.CommissionChangeEntryKeyIndex2(commissionChangeEntry.Staker), indexBytes) +} + +// GetCommissionChangeEntry ... +func (k Keeper) GetCommissionChangeEntry(ctx sdk.Context, index uint64) (val types.CommissionChangeEntry, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefix) + + b := store.Get(types.CommissionChangeEntryKey(index)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// GetCommissionChangeEntryByIndex2 returns a pending commission change entry by staker address (if there is one) +func (k Keeper) GetCommissionChangeEntryByIndex2(ctx sdk.Context, staker string) (val types.CommissionChangeEntry, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefixIndex2) + + b := store.Get(types.CommissionChangeEntryKeyIndex2(staker)) + if b == nil { + return val, false + } + + index := binary.BigEndian.Uint64(b) + + return k.GetCommissionChangeEntry(ctx, index) +} + +// RemoveCommissionChangeEntry ... +func (k Keeper) RemoveCommissionChangeEntry(ctx sdk.Context, commissionChangeEntry *types.CommissionChangeEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefix) + store.Delete(types.CommissionChangeEntryKey(commissionChangeEntry.Index)) + + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefixIndex2) + indexStore.Delete(types.CommissionChangeEntryKeyIndex2( + commissionChangeEntry.Staker, + )) +} + +// GetAllCommissionChangeEntries returns all pending commission change entries of all stakers +func (k Keeper) GetAllCommissionChangeEntries(ctx sdk.Context) (list []types.CommissionChangeEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.CommissionChangeEntryKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.CommissionChangeEntry + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/stakers/keeper/getters_leave.go b/x/stakers/keeper/getters_leave.go new file mode 100644 index 00000000..9815127f --- /dev/null +++ b/x/stakers/keeper/getters_leave.go @@ -0,0 +1,88 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// ##################### +// === QUEUE ENTRIES === +// ##################### + +// SetLeavePoolEntry ... +func (k Keeper) SetLeavePoolEntry(ctx sdk.Context, leavePoolEntry types.LeavePoolEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefix) + b := k.cdc.MustMarshal(&leavePoolEntry) + store.Set(types.LeavePoolEntryKey( + leavePoolEntry.Index, + ), b) + + // Insert the same entry with a different key prefix for query lookup + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefixIndex2) + indexStore.Set(types.LeavePoolEntryKeyIndex2( + leavePoolEntry.Staker, + leavePoolEntry.PoolId, + ), []byte{1}) +} + +// GetLeavePoolEntry ... +func (k Keeper) GetLeavePoolEntry(ctx sdk.Context, index uint64) (val types.LeavePoolEntry, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefix) + + b := store.Get(types.LeavePoolEntryKey(index)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// GetLeavePoolEntryByIndex2 ... +func (k Keeper) GetLeavePoolEntryByIndex2(ctx sdk.Context, staker string, poolId uint64) (val types.LeavePoolEntry, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefixIndex2) + + b := store.Get(types.LeavePoolEntryKeyIndex2(staker, poolId)) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// DoesLeavePoolEntryExistByIndex2 ... +func (k Keeper) DoesLeavePoolEntryExistByIndex2(ctx sdk.Context, staker string, poolId uint64) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefixIndex2) + + return store.Has(types.LeavePoolEntryKeyIndex2(staker, poolId)) +} + +// RemoveLeavePoolEntry ... +func (k Keeper) RemoveLeavePoolEntry(ctx sdk.Context, leavePoolEntry *types.LeavePoolEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefix) + store.Delete(types.LeavePoolEntryKey(leavePoolEntry.Index)) + + indexStore := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefixIndex2) + indexStore.Delete(types.LeavePoolEntryKeyIndex2( + leavePoolEntry.Staker, + leavePoolEntry.PoolId, + )) +} + +// GetAllLeavePoolEntries ... +func (k Keeper) GetAllLeavePoolEntries(ctx sdk.Context) (list []types.LeavePoolEntry) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.LeavePoolEntryKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.LeavePoolEntry + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/stakers/keeper/getters_params.go b/x/stakers/keeper/getters_params.go new file mode 100644 index 00000000..584f56a5 --- /dev/null +++ b/x/stakers/keeper/getters_params.go @@ -0,0 +1,36 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetParams returns the current x/stakers module parameters. +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + store := ctx.KVStore(k.storeKey) + + bz := store.Get(types.ParamsKey) + if bz == nil { + return params + } + + k.cdc.MustUnmarshal(bz, ¶ms) + return params +} + +// GetCommissionChangeTime returns the CommissionChangeTime param +func (k Keeper) GetCommissionChangeTime(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).CommissionChangeTime +} + +// GetLeavePoolTime returns the LeavePoolTime param +func (k Keeper) GetLeavePoolTime(ctx sdk.Context) (res uint64) { + return k.GetParams(ctx).LeavePoolTime +} + +// SetParams sets the x/stakers module parameters. +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(¶ms) + store.Set(types.ParamsKey, bz) +} diff --git a/x/stakers/keeper/getters_queue.go b/x/stakers/keeper/getters_queue.go new file mode 100644 index 00000000..e7315917 --- /dev/null +++ b/x/stakers/keeper/getters_queue.go @@ -0,0 +1,29 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetQueueState returns a queue state object based on the identifier as +// there are multiple queues present in the stakers module +func (k Keeper) GetQueueState(ctx sdk.Context, identifier types.QUEUE_IDENTIFIER) (state types.QueueState) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + b := store.Get(identifier) + + if b == nil { + return state + } + + k.cdc.MustUnmarshal(b, &state) + return +} + +// SetQueueState sets a endBlocker queue state based on the identifier. +// The identifier is used to distinguish between different queues. +func (k Keeper) SetQueueState(ctx sdk.Context, identifier types.QUEUE_IDENTIFIER, state types.QueueState) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + b := k.cdc.MustMarshal(&state) + store.Set(identifier, b) +} diff --git a/x/stakers/keeper/getters_staker.go b/x/stakers/keeper/getters_staker.go new file mode 100644 index 00000000..32d7641e --- /dev/null +++ b/x/stakers/keeper/getters_staker.go @@ -0,0 +1,268 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/query" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// UpdateStakerMetadata ... +func (k Keeper) UpdateStakerMetadata(ctx sdk.Context, address string, moniker string, website string, logo string) { + staker, found := k.GetStaker(ctx, address) + if found { + staker.Moniker = moniker + staker.Website = website + staker.Logo = logo + k.setStaker(ctx, staker) + } +} + +// UpdateStakerCommission ... +func (k Keeper) UpdateStakerCommission(ctx sdk.Context, address string, commission string) { + staker, found := k.GetStaker(ctx, address) + if found { + staker.Commission = commission + k.setStaker(ctx, staker) + } +} + +// AddValaccountToPool adds a valaccount to a pool. +// If valaccount already belongs to pool, nothing happens. +func (k Keeper) AddValaccountToPool(ctx sdk.Context, poolId uint64, stakerAddress string, valaddress string) { + if k.DoesStakerExist(ctx, stakerAddress) { + if !k.DoesValaccountExist(ctx, poolId, stakerAddress) { + k.SetValaccount(ctx, types.Valaccount{ + PoolId: poolId, + Staker: stakerAddress, + Valaddress: valaddress, + }) + k.AddOneToCount(ctx, poolId) + k.AddActiveStaker(ctx, stakerAddress) + } + } +} + +// RemoveValaccountFromPool removes a valaccount from a given pool and updates +// all aggregated variables. If the valaccount is not in the pool nothing happens. +func (k Keeper) RemoveValaccountFromPool(ctx sdk.Context, poolId uint64, stakerAddress string) { + // get valaccount + valaccount, valaccountFound := k.GetValaccount(ctx, poolId, stakerAddress) + + // if valaccount was found on pool continue + if valaccountFound { + // remove valaccount from pool + k.removeValaccount(ctx, valaccount) + k.subtractOneFromCount(ctx, poolId) + k.removeActiveStaker(ctx, stakerAddress) + } +} + +func (k Keeper) AppendStaker(ctx sdk.Context, staker types.Staker) { + k.setStaker(ctx, staker) +} + +// ############################# +// # Raw KV-Store operations # +// ############################# + +func (k Keeper) getAllStakersOfPool(ctx sdk.Context, poolId uint64) []types.Staker { + valaccounts := k.GetAllValaccountsOfPool(ctx, poolId) + + stakers := make([]types.Staker, 0) + + for _, valaccount := range valaccounts { + staker, _ := k.GetStaker(ctx, valaccount.Staker) + stakers = append(stakers, staker) + } + + return stakers +} + +// setStaker set a specific staker in the store from its index +func (k Keeper) setStaker(ctx sdk.Context, staker types.Staker) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.StakerKeyPrefix) + b := k.cdc.MustMarshal(&staker) + store.Set(types.StakerKey( + staker.Address, + ), b) +} + +// DoesStakerExist returns true if the staker exists +func (k Keeper) DoesStakerExist(ctx sdk.Context, staker string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.StakerKeyPrefix) + return store.Has(types.StakerKey(staker)) +} + +// GetStaker returns a staker from its index +func (k Keeper) GetStaker( + ctx sdk.Context, + staker string, +) (val types.Staker, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.StakerKeyPrefix) + + b := store.Get(types.StakerKey( + staker, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +func (k Keeper) GetPaginatedStakerQuery(ctx sdk.Context, pagination *query.PageRequest, accumulator func(staker types.Staker)) (*query.PageResponse, error) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.StakerKeyPrefix) + + pageRes, err := query.FilteredPaginate(store, pagination, func(key []byte, value []byte, accumulate bool) (bool, error) { + if accumulate { + var staker types.Staker + if err := k.cdc.Unmarshal(value, &staker); err != nil { + return false, err + } + accumulator(staker) + } + + return true, nil + }) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + + return pageRes, nil +} + +// GetAllStakers returns all staker +func (k Keeper) GetAllStakers(ctx sdk.Context) (list []types.Staker) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.StakerKeyPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Staker + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} + +// ############################# +// # Aggregation Data # +// ############################# + +func (k Keeper) GetStakerCountOfPool(ctx sdk.Context, poolId uint64) uint64 { + return k.getStat(ctx, poolId, types.STAKER_STATS_COUNT) +} + +func (k Keeper) AddOneToCount(ctx sdk.Context, poolId uint64) { + count := k.getStat(ctx, poolId, types.STAKER_STATS_COUNT) + k.setStat(ctx, poolId, types.STAKER_STATS_COUNT, count+1) +} + +func (k Keeper) subtractOneFromCount(ctx sdk.Context, poolId uint64) { + count := k.getStat(ctx, poolId, types.STAKER_STATS_COUNT) + k.setStat(ctx, poolId, types.STAKER_STATS_COUNT, count-1) +} + +// getStat get the total number of pool +func (k Keeper) getStat(ctx sdk.Context, poolId uint64, statType types.STAKER_STATS) uint64 { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + bz := store.Get(util.GetByteKey(string(statType), poolId)) + if bz == nil { + return 0 + } + return binary.BigEndian.Uint64(bz) +} + +// setStat set the total number of pool +func (k Keeper) setStat(ctx sdk.Context, poolId uint64, statType types.STAKER_STATS, count uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + bz := make([]byte, 8) + binary.BigEndian.PutUint64(bz, count) + store.Set(util.GetByteKey(string(statType), poolId), bz) +} + +// ############################# +// # Active Staker # +// ############################# +// Active Staker stores all stakers that are at least in one pool + +func (k Keeper) isActiveStaker(ctx sdk.Context, staker string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ActiveStakerIndex) + return store.Has(types.ActiveStakerKeyIndex(staker)) +} + +// AddActiveStaker increases the active-staker-count of the given staker by one. +// The amount tracks the number of pools the staker is in. It also allows +// to determine that a given staker is at least in one pool. +func (k Keeper) AddActiveStaker(ctx sdk.Context, staker string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ActiveStakerIndex) + // Get current count + count := uint64(0) + storeBytes := store.Get(types.ActiveStakerKeyIndex(staker)) + bytes := make([]byte, 8) + copy(bytes, storeBytes) + if len(bytes) == 8 { + count = binary.BigEndian.Uint64(bytes) + } else { + bytes = make([]byte, 8) + } + // Count represents in how many pools the current staker is active + count += 1 + + // Encode and store + binary.BigEndian.PutUint64(bytes, count) + store.Set(types.ActiveStakerKeyIndex(staker), bytes) +} + +// removeActiveStaker decrements the active-staker-count of the given staker +// by one. If the amount drop to zero the staker is removed from the set. +// Therefore, one can be sure, that only stakers which are participating in +// at least one pool are part of the set +func (k Keeper) removeActiveStaker(ctx sdk.Context, staker string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ActiveStakerIndex) + // Get current count + count := uint64(0) + storeBytes := store.Get(types.ActiveStakerKeyIndex(staker)) + bytes := make([]byte, 8) + copy(bytes, storeBytes) + + if len(bytes) == 8 { + count = binary.BigEndian.Uint64(bytes) + } else { + bytes = make([]byte, 8) + } + + if count == 0 || count == 1 { + store.Delete(types.ActiveStakerKeyIndex(staker)) + return + } + + // Count represents in how many pools the current staker is active + count -= 1 + + // Encode and store + binary.BigEndian.PutUint64(bytes, count) + store.Set(types.ActiveStakerKeyIndex(staker), bytes) +} + +// getAllActiveStakers returns all active stakers, i.e. every staker +// that is member of at least one pool. +func (k Keeper) getAllActiveStakers(ctx sdk.Context) (list []string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ActiveStakerIndex) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + list = append(list, string(iterator.Key())) + } + + return +} diff --git a/x/stakers/keeper/getters_valaccount.go b/x/stakers/keeper/getters_valaccount.go new file mode 100644 index 00000000..51fbe14d --- /dev/null +++ b/x/stakers/keeper/getters_valaccount.go @@ -0,0 +1,154 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// IncrementPoints increments to Points for a staker in a given pool. +// Returns the amount of the current points (including the current incrementation) +func (k Keeper) IncrementPoints(ctx sdk.Context, poolId uint64, stakerAddress string) uint64 { + valaccount, found := k.GetValaccount(ctx, poolId, stakerAddress) + if found { + valaccount.Points += 1 + k.SetValaccount(ctx, valaccount) + } + return valaccount.Points +} + +// ResetPoints sets the point count for the staker in the given pool back to zero. +// Returns the amount of points the staker had before the reset. +func (k Keeper) ResetPoints(ctx sdk.Context, poolId uint64, stakerAddress string) (previousPoints uint64) { + valaccount, found := k.GetValaccount(ctx, poolId, stakerAddress) + if found { + previousPoints = valaccount.Points + valaccount.Points = 0 + k.SetValaccount(ctx, valaccount) + } + return +} + +// GetAllValaccountsOfPool returns a list of all valaccount +func (k Keeper) GetAllValaccountsOfPool(ctx sdk.Context, poolId uint64) (val []*types.Valaccount) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefix) + + iterator := sdk.KVStorePrefixIterator(store, util.GetByteKey(poolId)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + valaccount := types.Valaccount{} + k.cdc.MustUnmarshal(iterator.Value(), &valaccount) + val = append(val, &valaccount) + } + + return +} + +// GetValaccountsFromStaker returns all pools the staker has valaccounts in +func (k Keeper) GetValaccountsFromStaker(ctx sdk.Context, stakerAddress string) (val []*types.Valaccount) { + storeIndex2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefixIndex2) + + iterator := sdk.KVStorePrefixIterator(storeIndex2, util.GetByteKey(stakerAddress)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + poolId := binary.BigEndian.Uint64(iterator.Key()[43 : 43+8]) + valaccount, valaccountFound := k.GetValaccount(ctx, poolId, stakerAddress) + + if valaccountFound { + val = append(val, &valaccount) + } + } + + return val +} + +// GetPoolCount returns the number of pools the current staker is +// currently participating. +func (k Keeper) GetPoolCount(ctx sdk.Context, stakerAddress string) (poolCount uint64) { + storeIndex2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefixIndex2) + iterator := sdk.KVStorePrefixIterator(storeIndex2, util.GetByteKey(stakerAddress)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + poolCount += 1 + } + return +} + +// ############################# +// # Raw KV-Store operations # +// ############################# + +// DoesValaccountExist only checks if the key is present in the KV-Store +// without loading and unmarshalling to full entry +func (k Keeper) DoesValaccountExist(ctx sdk.Context, poolId uint64, stakerAddress string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefix) + return store.Has(types.ValaccountKey(poolId, stakerAddress)) +} + +// SetValaccount set a specific Valaccount in the store from its index +func (k Keeper) SetValaccount(ctx sdk.Context, valaccount types.Valaccount) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefix) + b := k.cdc.MustMarshal(&valaccount) + store.Set(types.ValaccountKey( + valaccount.PoolId, + valaccount.Staker, + ), b) + + storeIndex2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefixIndex2) + storeIndex2.Set(types.ValaccountKeyIndex2( + valaccount.Staker, + valaccount.PoolId, + ), []byte{}) +} + +// removeValaccount removes a Valaccount from the store +func (k Keeper) removeValaccount(ctx sdk.Context, valaccount types.Valaccount) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefix) + store.Delete(types.ValaccountKey( + valaccount.PoolId, + valaccount.Staker, + )) + + storeIndex2 := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefixIndex2) + storeIndex2.Delete(types.ValaccountKeyIndex2( + valaccount.Staker, + valaccount.PoolId, + )) +} + +// GetValaccount returns a Valaccount from its index +func (k Keeper) GetValaccount(ctx sdk.Context, poolId uint64, stakerAddress string) (val types.Valaccount, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefix) + + b := store.Get(types.ValaccountKey( + poolId, + stakerAddress, + )) + if b == nil { + return val, false + } + + k.cdc.MustUnmarshal(b, &val) + return val, true +} + +// GetAllValaccounts ... +func (k Keeper) GetAllValaccounts(ctx sdk.Context) (list []types.Valaccount) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.ValaccountPrefix) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var val types.Valaccount + k.cdc.MustUnmarshal(iterator.Value(), &val) + list = append(list, val) + } + + return +} diff --git a/x/stakers/keeper/grpc_query.go b/x/stakers/keeper/grpc_query.go new file mode 100644 index 00000000..763c25ec --- /dev/null +++ b/x/stakers/keeper/grpc_query.go @@ -0,0 +1,21 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var _ types.QueryServer = Keeper{} + +func (k Keeper) Params(c context.Context, req *types.QueryParamsRequest) (*types.QueryParamsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil +} diff --git a/x/stakers/keeper/keeper.go b/x/stakers/keeper/keeper.go new file mode 100644 index 00000000..7e8dabe9 --- /dev/null +++ b/x/stakers/keeper/keeper.go @@ -0,0 +1,70 @@ +package keeper + +import ( + "fmt" + + delegationKeeper "github.com/KYVENetwork/chain/x/delegation/keeper" + "github.com/tendermint/tendermint/libs/log" + + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + memKey storetypes.StoreKey + + authority string + + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper + distrkeeper types.DistrKeeper + poolKeeper types.PoolKeeper + upgradeKeeper types.UpgradeKeeper + delegationKeeper delegationKeeper.Keeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storetypes.StoreKey, + memKey storetypes.StoreKey, + + authority string, + + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, + distrkeeper types.DistrKeeper, + poolKeeper types.PoolKeeper, + upgradeKeeper types.UpgradeKeeper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + memKey: memKey, + + authority: authority, + + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + distrkeeper: distrkeeper, + poolKeeper: poolKeeper, + upgradeKeeper: upgradeKeeper, + } +} + +func SetDelegationKeeper(k *Keeper, delegationKeeper delegationKeeper.Keeper) { + k.delegationKeeper = delegationKeeper +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func (k Keeper) StoreKey() storetypes.StoreKey { + return k.storeKey +} diff --git a/x/stakers/keeper/keeper_suite_test.go b/x/stakers/keeper/keeper_suite_test.go new file mode 100644 index 00000000..ff277872 --- /dev/null +++ b/x/stakers/keeper/keeper_suite_test.go @@ -0,0 +1,16 @@ +package keeper_test + +import ( + "fmt" + "testing" + + "github.com/KYVENetwork/chain/x/stakers/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestBundlesKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Keeper Test Suite", types.ModuleName)) +} diff --git a/x/stakers/keeper/logic_commission.go b/x/stakers/keeper/logic_commission.go new file mode 100644 index 00000000..1d888adc --- /dev/null +++ b/x/stakers/keeper/logic_commission.go @@ -0,0 +1,60 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// orderNewCommissionChange inserts a new change entry into the queue. +// The queue is checked in every endBlock and when the commissionChangeTime +// is over the new commission will be applied to the user. +// If another entry is currently in the queue it will be removed. +func (k Keeper) orderNewCommissionChange(ctx sdk.Context, staker string, commission string) { + // Remove existing queue entry + queueEntry, found := k.GetCommissionChangeEntryByIndex2(ctx, staker) + if found { + k.RemoveCommissionChangeEntry(ctx, &queueEntry) + } + + queueIndex := k.getNextQueueSlot(ctx, types.QUEUE_IDENTIFIER_COMMISSION) + + commissionChangeEntry := types.CommissionChangeEntry{ + Index: queueIndex, + Staker: staker, + Commission: commission, + CreationDate: ctx.BlockTime().Unix(), + } + + k.SetCommissionChangeEntry(ctx, commissionChangeEntry) +} + +// ProcessCommissionChangeQueue checks the queue for entries which are due +// and can be executed. If this is the case, the new commission +// will be applied to the staker +func (k Keeper) ProcessCommissionChangeQueue(ctx sdk.Context) { + k.processQueue(ctx, types.QUEUE_IDENTIFIER_COMMISSION, func(index uint64) bool { + // Get queue entry in question + queueEntry, found := k.GetCommissionChangeEntry(ctx, index) + + if !found { + // continue with the next entry + return true + } else if queueEntry.CreationDate+int64(k.GetCommissionChangeTime(ctx)) <= ctx.BlockTime().Unix() { + + k.RemoveCommissionChangeEntry(ctx, &queueEntry) + + k.UpdateStakerCommission(ctx, queueEntry.Staker, queueEntry.Commission) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventUpdateCommission{ + Staker: queueEntry.Staker, + Commission: queueEntry.Commission, + }) + + // Continue with next entry + return true + } + + // Stop queue processing + return false + }) +} diff --git a/x/stakers/keeper/logic_leave.go b/x/stakers/keeper/logic_leave.go new file mode 100644 index 00000000..56d90f84 --- /dev/null +++ b/x/stakers/keeper/logic_leave.go @@ -0,0 +1,47 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k Keeper) orderLeavePool(ctx sdk.Context, staker string, poolId uint64) error { + // Remove existing queue entry + if k.DoesLeavePoolEntryExistByIndex2(ctx, staker, poolId) { + return sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrPoolLeaveAlreadyInProgress.Error()) + } + + queueIndex := k.getNextQueueSlot(ctx, types.QUEUE_IDENTIFIER_LEAVE) + + leavePoolEntry := types.LeavePoolEntry{ + Index: queueIndex, + Staker: staker, + PoolId: poolId, + CreationDate: ctx.BlockTime().Unix(), + } + + k.SetLeavePoolEntry(ctx, leavePoolEntry) + + return nil +} + +// ProcessLeavePoolQueue ... +func (k Keeper) ProcessLeavePoolQueue(ctx sdk.Context) { + k.processQueue(ctx, types.QUEUE_IDENTIFIER_LEAVE, func(index uint64) bool { + // Get queue entry in question + queueEntry, found := k.GetLeavePoolEntry(ctx, index) + + if !found { + // continue with the next entry + return true + } else if queueEntry.CreationDate+int64(k.GetLeavePoolTime(ctx)) <= ctx.BlockTime().Unix() { + + k.RemoveLeavePoolEntry(ctx, &queueEntry) + k.LeavePool(ctx, queueEntry.Staker, queueEntry.PoolId) + + return true + } + return false + }) +} diff --git a/x/stakers/keeper/logic_queue.go b/x/stakers/keeper/logic_queue.go new file mode 100644 index 00000000..a58d9e14 --- /dev/null +++ b/x/stakers/keeper/logic_queue.go @@ -0,0 +1,45 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// getNextQueueSlot inserts an entry into the queue identified by `identifier` +// It automatically updates the queue state and uses the block time. +func (k Keeper) getNextQueueSlot(ctx sdk.Context, identifier types.QUEUE_IDENTIFIER) (index uint64) { + // unbondingState stores the start and the end of the queue with all unbonding entries + // the queue is ordered by time + queueState := k.GetQueueState(ctx, identifier) + + // Increase topIndex as a new entry is about to be appended + queueState.HighIndex += 1 + + k.SetQueueState(ctx, identifier, queueState) + + return queueState.HighIndex +} + +// processQueue passes the tail of the queue to the `processEntry(...)`-function +// The processing continues as long as the function returns true. +func (k Keeper) processQueue(ctx sdk.Context, identifier types.QUEUE_IDENTIFIER, processEntry func(index uint64) bool) { + // Get Queue information + queueState := k.GetQueueState(ctx, identifier) + + // flag for computing every entry at the end of the queue which is due. + // start processing the end of the queue + for commissionChangePerformed := true; commissionChangePerformed; { + commissionChangePerformed = false + + entryRemoved := processEntry(queueState.LowIndex + 1) + + if entryRemoved { + if queueState.LowIndex < queueState.HighIndex { + queueState.LowIndex += 1 + commissionChangePerformed = true + } + } + + } + k.SetQueueState(ctx, identifier, queueState) +} diff --git a/x/stakers/keeper/logic_stakers.go b/x/stakers/keeper/logic_stakers.go new file mode 100644 index 00000000..9cba6b81 --- /dev/null +++ b/x/stakers/keeper/logic_stakers.go @@ -0,0 +1,55 @@ +package keeper + +import ( + "math" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// getLowestStaker returns the staker with the lowest total stake +// (self-delegation + delegation) of a given pool. +// If all pool slots are taken, this is the staker who then +// gets kicked out. +func (k Keeper) getLowestStaker(ctx sdk.Context, poolId uint64) (val types.Staker, found bool) { + var minAmount uint64 = math.MaxUint64 + + for _, staker := range k.getAllStakersOfPool(ctx, poolId) { + delegationAmount := k.delegationKeeper.GetDelegationAmount(ctx, staker.Address) + if delegationAmount < minAmount { + minAmount = delegationAmount + val = staker + } + } + + return +} + +// ensureFreeSlot makes sure that a staker can join a given pool. +// If this is not possible an appropriate error is returned. +// A pool has a fixed amount of slots. If there are still free slots +// a staker can just join (even with the smallest stake possible). +// If all slots are taken, it checks if the new staker has more stake +// than the current lowest staker in that pool. +// If so, the lowest staker gets removed from the pool, so that the +// new staker can join. +func (k Keeper) ensureFreeSlot(ctx sdk.Context, poolId uint64, stakerAddress string) error { + // check if slots are still available + if k.GetStakerCountOfPool(ctx, poolId) >= types.MaxStakers { + // if not - get lowest staker + lowestStaker, _ := k.getLowestStaker(ctx, poolId) + + // if new pool joiner has more stake than lowest staker kick him out + newAmount := k.delegationKeeper.GetDelegationAmount(ctx, stakerAddress) + lowestAmount := k.delegationKeeper.GetDelegationAmount(ctx, lowestStaker.Address) + if newAmount > lowestAmount { + // remove lowest staker from pool + k.LeavePool(ctx, lowestStaker.Address, poolId) + } else { + return sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrStakeTooLow.Error(), k.delegationKeeper.GetDelegationAmount(ctx, lowestStaker.Address)) + } + } + + return nil +} diff --git a/x/stakers/keeper/msg_server.go b/x/stakers/keeper/msg_server.go new file mode 100644 index 00000000..4aeae435 --- /dev/null +++ b/x/stakers/keeper/msg_server.go @@ -0,0 +1,17 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/stakers/types" +) + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/x/stakers/keeper/msg_server_create_staker.go b/x/stakers/keeper/msg_server_create_staker.go new file mode 100644 index 00000000..19599dea --- /dev/null +++ b/x/stakers/keeper/msg_server_create_staker.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "context" + + delegationKeeper "github.com/KYVENetwork/chain/x/delegation/keeper" + delegationTypes "github.com/KYVENetwork/chain/x/delegation/types" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// CreateStaker handles the logic of an SDK message that allows protocol nodes to create +// a staker with an initial self delegation. +// Every user can create a staker object with some stake. However, +// only if self_delegation + delegation is large enough to join a pool the staker +// is able to participate in the protocol +func (k msgServer) CreateStaker(goCtx context.Context, msg *types.MsgCreateStaker) (*types.MsgCreateStakerResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Only create new stakers + if k.DoesStakerExist(ctx, msg.Creator) { + return nil, types.ErrStakerAlreadyCreated + } + + // Create and append new staker to store + k.AppendStaker(ctx, types.Staker{ + Address: msg.Creator, + Commission: types.DefaultCommission, + }) + + // Perform initial self delegation + delegationMsgServer := delegationKeeper.NewMsgServerImpl(k.delegationKeeper) + if _, err := delegationMsgServer.Delegate(ctx, &delegationTypes.MsgDelegate{ + Creator: msg.Creator, + Staker: msg.Creator, + Amount: msg.Amount, + }); err != nil { + return nil, err + } + + _ = ctx.EventManager().EmitTypedEvent(&types.EventCreateStaker{ + Staker: msg.Creator, + Amount: msg.Amount, + }) + + return &types.MsgCreateStakerResponse{}, nil +} diff --git a/x/stakers/keeper/msg_server_create_staker_test.go b/x/stakers/keeper/msg_server_create_staker_test.go new file mode 100644 index 00000000..c8897310 --- /dev/null +++ b/x/stakers/keeper/msg_server_create_staker_test.go @@ -0,0 +1,193 @@ +package keeper_test + +import ( + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/stakers/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_create_staker.go + +* Create a first new staker and delegate 100 $KYVE +* Do an additional 50 $KYVE self delegation after staker has already delegated 100 $KYVE +* Try to create staker with more $KYVE than available in balance +* Create a second staker by staking 150 $KYVE +* Try to create a staker again + +*/ + +var _ = Describe("msg_server_create_staker.go", Ordered, func() { + s := i.NewCleanChain() + + initialBalance := s.GetBalanceFromAddress(i.STAKER_0) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Create a first new staker and delegate 100 $KYVE", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.STAKER_0) + + staker, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + valaccounts := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(initialBalance - balanceAfter).To(Equal(100 * i.KYVE)) + + Expect(staker.Address).To(Equal(i.STAKER_0)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(staker.Commission).To(Equal(types.DefaultCommission)) + + Expect(staker.Moniker).To(BeEmpty()) + Expect(staker.Logo).To(BeEmpty()) + Expect(staker.Website).To(BeEmpty()) + + Expect(valaccounts).To(BeEmpty()) + }) + + It("Do an additional 50 $KYVE self delegation after staker has already delegated 100 $KYVE", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.STAKER_0, + Staker: i.STAKER_0, + Amount: 50 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.STAKER_0) + + staker, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + valaccounts := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(initialBalance - balanceAfter).To(Equal(150 * i.KYVE)) + + Expect(staker.Address).To(Equal(i.STAKER_0)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(150 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(150 * i.KYVE)) + + Expect(staker.Commission).To(Equal(types.DefaultCommission)) + + Expect(staker.Moniker).To(BeEmpty()) + Expect(staker.Logo).To(BeEmpty()) + Expect(staker.Website).To(BeEmpty()) + + Expect(valaccounts).To(HaveLen(0)) + }) + + It("Try to create staker with more $KYVE than available in balance", func() { + // ACT + currentBalance := s.GetBalanceFromAddress(i.STAKER_0) + + s.RunTxStakersError(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: currentBalance + 1, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.STAKER_0) + Expect(initialBalance - balanceAfter).To(BeZero()) + + _, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(found).To(BeFalse()) + }) + + It("Create a second staker by staking 150 $KYVE", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.BOB, + Amount: 150 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.BOB) + + staker, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.BOB) + valaccounts := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.BOB) + + Expect(found).To(BeTrue()) + + Expect(initialBalance - balanceAfter).To(Equal(150 * i.KYVE)) + + Expect(staker.Address).To(Equal(i.BOB)) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.BOB)).To(Equal(150 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.BOB, i.BOB)).To(Equal(150 * i.KYVE)) + + Expect(staker.Commission).To(Equal(types.DefaultCommission)) + + Expect(staker.Moniker).To(BeEmpty()) + Expect(staker.Logo).To(BeEmpty()) + Expect(staker.Website).To(BeEmpty()) + + Expect(valaccounts).To(BeEmpty()) + }) + + It("Try to create a staker again", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + balanceAfter := s.GetBalanceFromAddress(i.STAKER_0) + + staker, found := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + valaccounts := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(initialBalance - balanceAfter).To(Equal(100 * i.KYVE)) + + Expect(staker.Address).To(Equal(i.STAKER_0)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(staker.Commission).To(Equal(types.DefaultCommission)) + + Expect(staker.Moniker).To(BeEmpty()) + Expect(staker.Logo).To(BeEmpty()) + Expect(staker.Website).To(BeEmpty()) + + Expect(valaccounts).To(BeEmpty()) + }) +}) diff --git a/x/stakers/keeper/msg_server_join_pool.go b/x/stakers/keeper/msg_server_join_pool.go new file mode 100644 index 00000000..bcd49175 --- /dev/null +++ b/x/stakers/keeper/msg_server_join_pool.go @@ -0,0 +1,87 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// JoinPool handles the SDK message of joining a pool. +// For joining a pool the staker needs to exist and must not +// be in that pool (even with a different valaccount) +// Second, there must be free slots available or the staker +// must have more stake than the lowest staker in that pool. +// After the staker joined the pool he is subject to slashing. +// The protocol node should be configured and running before +// submitting this transaction +func (k msgServer) JoinPool(goCtx context.Context, msg *types.MsgJoinPool) (*types.MsgJoinPoolResponse, error) { + // Unwrap context and attempt to fetch the pool. + ctx := sdk.UnwrapSDKContext(goCtx) + + pool, poolErr := k.poolKeeper.GetPoolWithError(ctx, msg.PoolId) + if poolErr != nil { + return nil, poolErr + } + if pool.Disabled { + return nil, sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrCanNotJoinDisabledPool.Error()) + } + + // throw error if staker was not found + staker, stakerFound := k.GetStaker(ctx, msg.Creator) + if !stakerFound { + return nil, sdkErrors.Wrapf(sdkErrors.ErrNotFound, types.ErrNoStaker.Error()) + } + + // Stakers are not allowed to use their own address, to prevent + // users from putting their staker private key on the protocol node server. + if msg.Creator == msg.Valaddress { + return nil, sdkErrors.Wrapf(sdkErrors.ErrInvalidRequest, types.ErrValaddressSameAsStaker.Error()) + } + + // Stakers are not allowed to join a pool twice. + if _, valaccountFound := k.GetValaccount(ctx, msg.PoolId, msg.Creator); valaccountFound { + return nil, sdkErrors.Wrapf(sdkErrors.ErrInvalidRequest, types.ErrAlreadyJoinedPool.Error()) + } + + // Every valaddress can only be used for one pool. It is not allowed + // to use the same valaddress for multiple pools. (to avoid account sequence errors, + // when two processes try so submit transactions simultaneously) + for _, valaccount := range k.GetValaccountsFromStaker(ctx, msg.Creator) { + if valaccount.Valaddress == msg.Valaddress { + return nil, sdkErrors.Wrapf(sdkErrors.ErrInvalidRequest, types.ValaddressAlreadyUsed.Error()) + } + } + + // It is not allowed to use the valaddress of somebody else. + for _, poolStaker := range k.GetAllStakerAddressesOfPool(ctx, msg.PoolId) { + valaccount, _ := k.GetValaccount(ctx, msg.PoolId, poolStaker) + + if valaccount.Valaddress == msg.Valaddress { + return nil, sdkErrors.Wrapf(sdkErrors.ErrInvalidRequest, types.ValaddressAlreadyUsed.Error()) + } + } + + // Only join if it is possible + if errFreeSlot := k.ensureFreeSlot(ctx, msg.PoolId, staker.Address); errFreeSlot != nil { + return nil, errFreeSlot + } + + k.AddValaccountToPool(ctx, msg.PoolId, msg.Creator, msg.Valaddress) + + if err := util.TransferFromAddressToAddress(k.bankKeeper, ctx, msg.Creator, msg.Valaddress, msg.Amount); err != nil { + return nil, err + } + + _ = ctx.EventManager().EmitTypedEvent(&types.EventJoinPool{ + PoolId: msg.PoolId, + Staker: msg.Creator, + Valaddress: msg.Valaddress, + Amount: msg.Amount, + }) + + return &types.MsgJoinPoolResponse{}, nil +} diff --git a/x/stakers/keeper/msg_server_join_pool_test.go b/x/stakers/keeper/msg_server_join_pool_test.go new file mode 100644 index 00000000..1691545f --- /dev/null +++ b/x/stakers/keeper/msg_server_join_pool_test.go @@ -0,0 +1,865 @@ +package keeper_test + +import ( + delegationtypes "github.com/KYVENetwork/chain/x/delegation/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_join_pool.go + +* Test if a newly created staker is participating in no pools yet +* Join the first pool as the first staker to a newly created pool +* Join a pool with zero delegation +* Join disabled pool +* Join a pool where other stakers have already joined +* Self-Delegate more KYVE after joining a pool +* Join a pool with the same valaddress as the staker address +* Try to join the same pool with the same valaddress again +* Try to join the same pool with a different valaddress +* Try to join another pool with the same valaddress again +* Try to join another pool with a valaddress that is already used by another staker +* Try to join another pool with a different valaddress +* Join a pool with a valaddress which does not exist on chain yet +* Join a pool with a valaddress which does not exist on chain yet and send 0 funds +* Join a pool with an invalid valaddress +* Join a pool and fund the valaddress with more KYVE than available in balance +* Kick out lowest staker by joining a full pool +* Fail to kick out lowest staker because not enough stake +* Kick out lowest staker with respect to stake + delegation +* Fail to kick out lowest staker because not enough stake + delegation + +*/ + +var _ = Describe("msg_server_join_pool.go", Ordered, func() { + s := i.NewCleanChain() + + initialBalanceStaker0 := uint64(0) + initialBalanceValaddress0 := uint64(0) + + initialBalanceStaker1 := uint64(0) + initialBalanceValaddress1 := uint64(0) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create pool + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + // create staker + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + initialBalanceStaker0 = s.GetBalanceFromAddress(i.STAKER_0) + initialBalanceValaddress0 = s.GetBalanceFromAddress(i.VALADDRESS_0) + + initialBalanceStaker1 = s.GetBalanceFromAddress(i.STAKER_1) + initialBalanceValaddress1 = s.GetBalanceFromAddress(i.VALADDRESS_1) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Test if a newly created staker is participating in no pools yet", func() { + // ASSERT + valaccounts := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + Expect(valaccounts).To(HaveLen(0)) + }) + + It("Join the first pool as the first staker to a newly created pool", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + balanceAfterStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + balanceAfterValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + Expect(initialBalanceStaker0 - balanceAfterStaker0).To(Equal(100 * i.KYVE)) + Expect(balanceAfterValaddress0 - initialBalanceValaddress0).To(Equal(100 * i.KYVE)) + + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_0)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(totalStakeOfPool)) + }) + + It("Join a pool with zero delegation", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 0 * i.KYVE, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0 * i.KYVE, + }) + + // ASSERT + balanceAfterStaker1 := s.GetBalanceFromAddress(i.STAKER_1) + balanceAfterValaddress1 := s.GetBalanceFromAddress(i.VALADDRESS_1) + + Expect(initialBalanceStaker1).To(Equal(balanceAfterStaker1)) + Expect(initialBalanceValaddress1).To(Equal(balanceAfterValaddress1)) + + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_1) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_1) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_1)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_1)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + + Expect(totalStakeOfPool).To(BeZero()) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_1)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_1, i.STAKER_1)).To(Equal(totalStakeOfPool)) + }) + + It("Join disabled pool", func() { + // ARRANGE + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "DisabledPool", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + Disabled: true, + }) + + // ACT + _, err := s.RunTx(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 1, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + Expect(err.Error()).To(Equal("can not join disabled pool: internal logic error")) + + // ASSERT + balanceAfterStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + balanceAfterValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + Expect(initialBalanceStaker0 - balanceAfterStaker0).To(Equal(0 * i.KYVE)) + Expect(balanceAfterValaddress0 - initialBalanceValaddress0).To(Equal(0 * i.KYVE)) + + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(0)) + + _, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 1, i.STAKER_0) + + Expect(found).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 1) + + Expect(valaccountsOfPool).To(HaveLen(0)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 1) + + Expect(totalStakeOfPool).To(Equal(0 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(s.App().StakersKeeper.GetActiveStakers(s.Ctx())).To(HaveLen(0)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("join a pool where other stakers have already joined", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0 * i.KYVE, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 0 * i.KYVE, + }) + + // ASSERT + balanceAfterStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + balanceAfterValaddress0 := s.GetBalanceFromAddress(i.VALADDRESS_0) + + Expect(initialBalanceStaker0 - balanceAfterStaker0).To(BeZero()) + Expect(balanceAfterValaddress0 - initialBalanceValaddress0).To(BeZero()) + + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_0)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(2)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + + Expect(totalStakeOfPool).To(Equal(200 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + }) + + It("Self-Delegate more KYVE after joining a pool", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + + // ACT + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.STAKER_0, + Staker: i.STAKER_0, + Amount: 50 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_0)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool = s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + + Expect(totalStakeOfPool).To(Equal(150 * i.KYVE)) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(totalStakeOfPool)) + }) + + It("Try to join the same pool with the same valaddress again", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + }) + + It("join a pool with the same valaddress as the staker address", func() { + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(BeEmpty()) + }) + + It("Try to join the same pool with a different valaddress", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + }) + + It("Try to join another pool with the same valaddress again", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest2", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 1, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + Expect(valaccountsOfStaker).To(HaveLen(1)) + }) + + It("Try to join pool with a valaddress that is already used by another staker", func() { + // ARRANGE + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest2", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 1, + Valaddress: i.VALADDRESS_1, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + Expect(valaccountsOfStaker).To(HaveLen(1)) + }) + + It("Try to join pool with a valaddress that is already used by another staker", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_1) + Expect(valaccountsOfStaker).To(BeEmpty()) + }) + + It("Try to join another pool with a different valaddress", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 100 * i.KYVE, + }) + + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest2", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 1, + Valaddress: i.VALADDRESS_1, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + Expect(valaccountsOfStaker).To(HaveLen(2)) + }) + + It("Join a pool with a valaddress which does not exist on chain yet", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: "kyve1dx0nvx7y9d44jvr2dr6r2p636jea3f9827rn0x", + Amount: 100 * i.KYVE, + }) + + // ASSERT + balanceAfterStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + balanceAfterUnknown := s.GetBalanceFromAddress("kyve1dx0nvx7y9d44jvr2dr6r2p636jea3f9827rn0x") + + Expect(initialBalanceStaker0 - balanceAfterStaker0).To(Equal(100 * i.KYVE)) + Expect(balanceAfterUnknown).To(Equal(100 * i.KYVE)) + + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal("kyve1dx0nvx7y9d44jvr2dr6r2p636jea3f9827rn0x")) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(totalStakeOfPool)) + }) + + It("Join a pool with a valaddress which does not exist on chain yet and send 0 funds", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: "kyve1dx0nvx7y9d44jvr2dr6r2p636jea3f9827rn0x", + Amount: 0 * i.KYVE, + }) + + // ASSERT + balanceAfterStaker0 := s.GetBalanceFromAddress(i.STAKER_0) + balanceAfterUnknown := s.GetBalanceFromAddress("kyve1dx0nvx7y9d44jvr2dr6r2p636jea3f9827rn0x") + + Expect(initialBalanceStaker0 - balanceAfterStaker0).To(BeZero()) + Expect(balanceAfterUnknown).To(BeZero()) + + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal("kyve1dx0nvx7y9d44jvr2dr6r2p636jea3f9827rn0x")) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeFalse()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(totalStakeOfPool)) + }) + + It("Join a pool with an invalid valaddress", func() { + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: "invalid_valaddress", + Amount: 100 * i.KYVE, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(BeEmpty()) + }) + + It("Join a pool and fund the valaddress with more KYVE than available in balance", func() { + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: "invalid_valaddress", + Amount: initialBalanceStaker0 + 1, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.ALICE) + + Expect(valaccountsOfStaker).To(BeEmpty()) + }) + + It("Kick out lowest staker by joining a full pool", func() { + // Arrange + Expect(stakerstypes.MaxStakers).To(Equal(50)) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 1, + }) + + for k := 0; k < 49; k++ { + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.DUMMY[k], + Amount: 150 * i.KYVE, + }) + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.DUMMY[k], + PoolId: 0, + Valaddress: i.VALDUMMY[k], + Amount: 1, + }) + } + + // STAKER_0 is lowest staker and all stakers are full now. + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 150 * i.KYVE, + }) + + // Act + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 1, + }) + + // Assert + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 150) * i.KYVE)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).ToNot(ContainElement(i.STAKER_0)) + }) + + It("Fail to kick out lowest staker because not enough stake", func() { + // Arrange + Expect(stakerstypes.MaxStakers).To(Equal(50)) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 1, + }) + + for k := 0; k < 49; k++ { + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.DUMMY[k], + Amount: 150 * i.KYVE, + }) + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.DUMMY[k], + PoolId: 0, + Valaddress: i.VALDUMMY[k], + Amount: 1, + }) + } + + // STAKER_0 is lowest staker and all stakers are full now. + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + // Act + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 1, + }) + + // Assert + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).To(ContainElement(i.STAKER_0)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).ToNot(ContainElement(i.STAKER_1)) + }) + + It("Kick out lowest staker with respect to stake + delegation", func() { + // ARRANGE + Expect(stakerstypes.MaxStakers).To(Equal(50)) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 1 * i.KYVE, + }) + + for k := 0; k < 49; k++ { + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.DUMMY[k], + Amount: 150 * i.KYVE, + }) + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.DUMMY[k], + PoolId: 0, + Valaddress: i.VALDUMMY[k], + Amount: 1 * i.KYVE, + }) + } + + // Alice is lowest staker and all stakers are full now. + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 150 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_0, + Amount: 150 * i.KYVE, + }) // Staker0 has now 250 delegation + + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 1, + }) + + // ASSERT + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 250) * i.KYVE)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).To(ContainElement(i.STAKER_0)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).NotTo(ContainElement(i.STAKER_1)) + }) + + It("Fail to kick out lowest staker because not enough stake", func() { + // Arrange + Expect(stakerstypes.MaxStakers).To(Equal(50)) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 1, + }) + + for k := 0; k < 49; k++ { + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.DUMMY[k], + Amount: 150 * i.KYVE, + }) + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.DUMMY[k], + PoolId: 0, + Valaddress: i.VALDUMMY[k], + Amount: 1, + }) + } + + // STAKER_0 is lowest staker and all stakers are full now. + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + // Act + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 1, + }) + + // Assert + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).To(ContainElement(i.STAKER_0)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).ToNot(ContainElement(i.STAKER_1)) + }) + + It("Fail to kick out lowest staker because not enough stake + delegation", func() { + // ARRANGE + Expect(stakerstypes.MaxStakers).To(Equal(50)) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + Amount: 1 * i.KYVE, + }) + + for k := 0; k < 49; k++ { + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.DUMMY[k], + Amount: 150 * i.KYVE, + }) + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.DUMMY[k], + PoolId: 0, + Valaddress: i.VALDUMMY[k], + Amount: 1 * i.KYVE, + }) + } + + // Alice is lowest staker and all stakers are full now. + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + s.RunTxDelegatorSuccess(&delegationtypes.MsgDelegate{ + Creator: i.ALICE, + Staker: i.STAKER_1, + Amount: 50 * i.KYVE, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 0, + }) + + // ASSERT + Expect(s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0)).To(Equal((150*49 + 100) * i.KYVE)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).To(ContainElement(i.STAKER_0)) + Expect(s.App().StakersKeeper.GetAllStakerAddressesOfPool(s.Ctx(), 0)).NotTo(ContainElement(i.STAKER_1)) + }) +}) diff --git a/x/stakers/keeper/msg_server_leave_pool.go b/x/stakers/keeper/msg_server_leave_pool.go new file mode 100644 index 00000000..268ce46a --- /dev/null +++ b/x/stakers/keeper/msg_server_leave_pool.go @@ -0,0 +1,32 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// LeavePool handles the SDK message of preparing a pool leave. +// Stakers can not leave a pool immediately. Instead, they need +// to notify the system that they want to leave a pool. +// The actual leaving happens after `LeavePoolTime` is over. +func (k msgServer) LeavePool(goCtx context.Context, msg *types.MsgLeavePool) (*types.MsgLeavePoolResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + valaccount, valaccountFound := k.GetValaccount(ctx, msg.PoolId, msg.Creator) + if !valaccountFound { + return nil, sdkErrors.Wrapf(sdkErrors.ErrInvalidRequest, types.ErrAlreadyLeftPool.Error()) + } + + valaccount.IsLeaving = true + k.SetValaccount(ctx, valaccount) + + // Creates the queue entry to leave a pool. Does nothing further + if err := k.orderLeavePool(ctx, msg.Creator, msg.PoolId); err != nil { + return nil, err + } + + return &types.MsgLeavePoolResponse{}, nil +} diff --git a/x/stakers/keeper/msg_server_leave_pool_test.go b/x/stakers/keeper/msg_server_leave_pool_test.go new file mode 100644 index 00000000..6d2f755f --- /dev/null +++ b/x/stakers/keeper/msg_server_leave_pool_test.go @@ -0,0 +1,303 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + pooltypes "github.com/KYVENetwork/chain/x/pool/types" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_leave_pool.go + +* Leave a pool a staker has just joined as the first one +* Leave a pool multiple other stakers have joined previously +* Leave one of multiple pools a staker has previously joined +* Try to leave a pool again +* Leave a pool a staker has never joined + +*/ + +var _ = Describe("msg_server_leave_pool.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create pool + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + // create staker + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + + // join pool + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 0, + Valaddress: i.VALADDRESS_0, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Leave a pool a staker has just joined as the first one", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgLeavePool{ + Creator: i.STAKER_0, + PoolId: 0, + }) + s.PerformValidityChecks() + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_0)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeTrue()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(totalStakeOfPool)) + + s.PerformValidityChecks() + + // wait for leave pool + s.CommitAfterSeconds(s.App().StakersKeeper.GetLeavePoolTime(s.Ctx())) + s.CommitAfterSeconds(1) + + valaccountsOfStaker = s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(BeEmpty()) + + _, found = s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeFalse()) + + valaccountsOfPool = s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(BeEmpty()) + + totalStakeOfPool = s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + Expect(totalStakeOfPool).To(BeZero()) + }) + + It("Leave a pool multiple other stakers have joined previously", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_1, + PoolId: 0, + Valaddress: i.VALADDRESS_1, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgLeavePool{ + Creator: i.STAKER_0, + PoolId: 0, + }) + s.PerformValidityChecks() + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(BeZero()) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_0)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeTrue()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(2)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + + Expect(totalStakeOfPool).To(Equal(200 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(100 * i.KYVE)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(100 * i.KYVE)) + + s.PerformValidityChecks() + + // wait for leave pool + s.CommitAfterSeconds(s.App().StakersKeeper.GetLeavePoolTime(s.Ctx())) + s.CommitAfterSeconds(1) + + valaccountsOfStaker = s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(BeEmpty()) + + _, found = s.App().StakersKeeper.GetValaccount(s.Ctx(), 0, i.STAKER_0) + + Expect(found).To(BeFalse()) + + valaccountsOfPool = s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 0) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool = s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 0) + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + }) + + It("Try to leave a pool again", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgLeavePool{ + Creator: i.STAKER_0, + PoolId: 0, + }) + s.PerformValidityChecks() + + // ACT + s.RunTxStakersError(&stakerstypes.MsgLeavePool{ + Creator: i.STAKER_0, + PoolId: 0, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + Expect(valaccountsOfStaker).To(HaveLen(1)) + + // wait for leave pool + s.CommitAfterSeconds(s.App().StakersKeeper.GetLeavePoolTime(s.Ctx())) + s.CommitAfterSeconds(1) + + valaccountsOfStaker = s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + Expect(valaccountsOfStaker).To(BeEmpty()) + }) + + It("Leave one of multiple pools a staker has previously joined", func() { + // ARRANGE + s.App().PoolKeeper.AppendPool(s.Ctx(), pooltypes.Pool{ + Name: "Moontest", + Protocol: &pooltypes.Protocol{ + Version: "0.0.0", + Binaries: "{}", + LastUpgrade: uint64(s.Ctx().BlockTime().Unix()), + }, + UpgradePlan: &pooltypes.UpgradePlan{}, + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgJoinPool{ + Creator: i.STAKER_0, + PoolId: 1, + Valaddress: i.VALADDRESS_1, + }) + s.PerformValidityChecks() + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgLeavePool{ + Creator: i.STAKER_0, + PoolId: 1, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(2)) + + valaccount, found := s.App().StakersKeeper.GetValaccount(s.Ctx(), 1, i.STAKER_0) + + Expect(found).To(BeTrue()) + + Expect(valaccount.Staker).To(Equal(i.STAKER_0)) + Expect(valaccount.PoolId).To(Equal(uint64(1))) + Expect(valaccount.Valaddress).To(Equal(i.VALADDRESS_1)) + Expect(valaccount.Points).To(BeZero()) + Expect(valaccount.IsLeaving).To(BeTrue()) + + valaccountsOfPool := s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 1) + + Expect(valaccountsOfPool).To(HaveLen(1)) + + totalStakeOfPool := s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 1) + Expect(totalStakeOfPool).To(Equal(100 * i.KYVE)) + + Expect(s.App().DelegationKeeper.GetDelegationAmount(s.Ctx(), i.STAKER_0)).To(Equal(totalStakeOfPool)) + Expect(s.App().DelegationKeeper.GetDelegationAmountOfDelegator(s.Ctx(), i.STAKER_0, i.STAKER_0)).To(Equal(totalStakeOfPool)) + + // wait for leave pool + s.CommitAfterSeconds(s.App().StakersKeeper.GetLeavePoolTime(s.Ctx())) + s.CommitAfterSeconds(1) + + valaccountsOfStaker = s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_0) + + Expect(valaccountsOfStaker).To(HaveLen(1)) + + _, found = s.App().StakersKeeper.GetValaccount(s.Ctx(), 1, i.STAKER_0) + + Expect(found).To(BeFalse()) + + valaccountsOfPool = s.App().StakersKeeper.GetAllValaccountsOfPool(s.Ctx(), 1) + + Expect(valaccountsOfPool).To(BeEmpty()) + + totalStakeOfPool = s.App().DelegationKeeper.GetDelegationOfPool(s.Ctx(), 1) + Expect(totalStakeOfPool).To(BeZero()) + }) + + It("Leave a pool a staker has never joined", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersError(&stakerstypes.MsgLeavePool{ + Creator: i.STAKER_1, + PoolId: 0, + }) + + // ASSERT + valaccountsOfStaker := s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_1) + Expect(valaccountsOfStaker).To(BeEmpty()) + + // wait for leave pool + s.CommitAfterSeconds(s.App().StakersKeeper.GetLeavePoolTime(s.Ctx())) + s.CommitAfterSeconds(1) + + valaccountsOfStaker = s.App().StakersKeeper.GetValaccountsFromStaker(s.Ctx(), i.STAKER_1) + Expect(valaccountsOfStaker).To(BeEmpty()) + }) +}) diff --git a/x/stakers/keeper/msg_server_update_commission.go b/x/stakers/keeper/msg_server_update_commission.go new file mode 100644 index 00000000..6d10d536 --- /dev/null +++ b/x/stakers/keeper/msg_server_update_commission.go @@ -0,0 +1,37 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// UpdateCommission creates a queue entry to update the staker commission. +// After the `CommissionChangeTime` is over the new commission will be applied. +// If an update is currently in the queue it will get removed from the queue +// and the user needs to wait again for the full time to pass. +func (k msgServer) UpdateCommission(goCtx context.Context, msg *types.MsgUpdateCommission) (*types.MsgUpdateCommissionResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Check if the sender is a protocol node (aka has staked into this pool). + if !k.DoesStakerExist(ctx, msg.Creator) { + return nil, sdkErrors.Wrap(sdkErrors.ErrUnauthorized, types.ErrNoStaker.Error()) + } + + // Validate commission. + commission, err := sdk.NewDecFromStr(msg.Commission) + if err != nil { + return nil, sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidCommission.Error(), msg.Commission) + } + + if commission.LT(sdk.NewDec(int64(0))) || commission.GT(sdk.NewDec(int64(1))) { + return nil, sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidCommission.Error(), msg.Commission) + } + + // Insert commission change into queue + k.orderNewCommissionChange(ctx, msg.Creator, msg.Commission) + + return &types.MsgUpdateCommissionResponse{}, nil +} diff --git a/x/stakers/keeper/msg_server_update_commission_test.go b/x/stakers/keeper/msg_server_update_commission_test.go new file mode 100644 index 00000000..852a20a5 --- /dev/null +++ b/x/stakers/keeper/msg_server_update_commission_test.go @@ -0,0 +1,250 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_update_commission.go + +* Get the default commission from a newly created staker +* Update commission to 50% from previously default commission +* Update commission to 0% from previously default commission +* Update commission to 100% from previously default commission +* Update commission with an invalid number from previously default commission +* Update commission with a negative number from previously default commission +* Update commission with a too high number from previously default commission +* Update commission multiple times during the commission change time +* Update commission multiple times during the commission change time with the same value +* Update commission with multiple stakers + +*/ + +var _ = Describe("msg_server_update_commission.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create staker + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Get the default commission from a newly created staker", func() { + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + }) + + It("Update commission to 50% from previously default commission", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.5", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + + // wait for update + s.CommitAfterSeconds(s.App().StakersKeeper.GetCommissionChangeTime(s.Ctx())) + s.CommitAfterSeconds(1) + + staker, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal("0.5")) + }) + + It("Update commission to 0% from previously default commission", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + + // wait for update + s.CommitAfterSeconds(s.App().StakersKeeper.GetCommissionChangeTime(s.Ctx())) + s.CommitAfterSeconds(1) + + staker, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal("0")) + }) + + It("Update commission to 100% from previously default commission", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "1", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + + // wait for update + s.CommitAfterSeconds(s.App().StakersKeeper.GetCommissionChangeTime(s.Ctx())) + s.CommitAfterSeconds(1) + + staker, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal("1")) + }) + + It("Update commission with an invalid number from previously default commission", func() { + // ACT + s.RunTxStakersError(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "teset", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + }) + + It("Update commission with a negative number from previously default commission", func() { + // ACT + s.RunTxStakersError(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "-0.5", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + }) + + It("Update commission with a too high number from previously default commission", func() { + // ACT + s.RunTxStakersError(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "2", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + }) + + It("Update commission multiple times during the commission change time", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.5", + }) + s.PerformValidityChecks() + + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.2", + }) + s.PerformValidityChecks() + + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.3", + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + + // wait for update + s.CommitAfterSeconds(s.App().StakersKeeper.GetCommissionChangeTime(s.Ctx())) + s.CommitAfterSeconds(1) + + staker, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal("0.3")) + }) + + It("Update commission multiple times during the commission change time with the same value", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.5", + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.2", + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: stakerstypes.DefaultCommission, + }) + s.PerformValidityChecks() + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + + // wait for update + s.CommitAfterSeconds(s.App().StakersKeeper.GetCommissionChangeTime(s.Ctx())) + s.CommitAfterSeconds(1) + + staker, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker.Commission).To(Equal(stakerstypes.DefaultCommission)) + }) + + It("Update commission with multiple stakers", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_1, + Amount: 100 * i.KYVE, + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_0, + Commission: "0.5", + }) + + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateCommission{ + Creator: i.STAKER_1, + Commission: "0.5", + }) + + s.PerformValidityChecks() + + // ASSERT + staker0, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker0.Commission).To(Equal(stakerstypes.DefaultCommission)) + + staker1, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_1) + Expect(staker1.Commission).To(Equal(stakerstypes.DefaultCommission)) + + // wait for update + s.CommitAfterSeconds(s.App().StakersKeeper.GetCommissionChangeTime(s.Ctx())) + s.CommitAfterSeconds(1) + + staker0, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + Expect(staker0.Commission).To(Equal("0.5")) + + staker1, _ = s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_1) + Expect(staker1.Commission).To(Equal("0.5")) + }) +}) diff --git a/x/stakers/keeper/msg_server_update_metadata.go b/x/stakers/keeper/msg_server_update_metadata.go new file mode 100644 index 00000000..091fea93 --- /dev/null +++ b/x/stakers/keeper/msg_server_update_metadata.go @@ -0,0 +1,32 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/stakers/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// UpdateMetadata allows a staker to change basic metadata like moniker, address, logo, etc. +// The update is performed immediately. +func (k msgServer) UpdateMetadata(goCtx context.Context, msg *types.MsgUpdateMetadata) (*types.MsgUpdateMetadataResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + // Check if the sender is a protocol node (aka has staked into this pool). + if !k.DoesStakerExist(ctx, msg.Creator) { + return nil, sdkErrors.Wrap(sdkErrors.ErrUnauthorized, types.ErrNoStaker.Error()) + } + + // Apply new metadata to staker + k.UpdateStakerMetadata(ctx, msg.Creator, msg.Moniker, msg.Website, msg.Logo) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventUpdateMetadata{ + Staker: msg.Creator, + Moniker: msg.Moniker, + Website: msg.Website, + Logo: msg.Logo, + }) + + return &types.MsgUpdateMetadataResponse{}, nil +} diff --git a/x/stakers/keeper/msg_server_update_metadata_test.go b/x/stakers/keeper/msg_server_update_metadata_test.go new file mode 100644 index 00000000..fceb582f --- /dev/null +++ b/x/stakers/keeper/msg_server_update_metadata_test.go @@ -0,0 +1,131 @@ +package keeper_test + +import ( + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + i "github.com/KYVENetwork/chain/testutil/integration" + stakerstypes "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_update_metadata.go + +* Get the default metadata of a newly created staker +* Update metadata with real values of a newly created staker +* Reset metadata to empty values +* Exceed max length + +*/ + +var _ = Describe("msg_server_update_metadata.go", Ordered, func() { + s := i.NewCleanChain() + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChain() + + // create staker + s.RunTxStakersSuccess(&stakerstypes.MsgCreateStaker{ + Creator: i.STAKER_0, + Amount: 100 * i.KYVE, + }) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Get the default metadata of a newly created staker", func() { + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + + Expect(staker.Moniker).To(BeEmpty()) + Expect(staker.Website).To(BeEmpty()) + Expect(staker.Logo).To(BeEmpty()) + }) + + It("Update metadata with real values of a newly created staker", func() { + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateMetadata{ + Creator: i.STAKER_0, + Moniker: "KYVE Node Runner", + Website: "https://kyve.network", + Logo: "https://arweave.net/Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + }) + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + + Expect(staker.Moniker).To(Equal("KYVE Node Runner")) + Expect(staker.Website).To(Equal("https://kyve.network")) + Expect(staker.Logo).To(Equal("https://arweave.net/Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU")) + }) + + It("Reset metadata to empty values", func() { + // ARRANGE + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateMetadata{ + Creator: i.STAKER_0, + Moniker: "KYVE Node Runner", + Website: "https://kyve.network", + Logo: "https://arweave.net/Tewyv2P5VEG8EJ6AUQORdqNTectY9hlOrWPK8wwo-aU", + }) + + // ACT + s.RunTxStakersSuccess(&stakerstypes.MsgUpdateMetadata{ + Creator: i.STAKER_0, + Moniker: "", + Website: "", + Logo: "", + }) + + // ASSERT + staker, _ := s.App().StakersKeeper.GetStaker(s.Ctx(), i.STAKER_0) + + Expect(staker.Moniker).To(BeEmpty()) + Expect(staker.Website).To(BeEmpty()) + Expect(staker.Logo).To(BeEmpty()) + }) + + It("One below max length", func() { + // ARRANGE + var stringStillAllowed string + for i := 0; i < 255; i++ { + stringStillAllowed += "." + } + + // ACT + msg := stakerstypes.MsgUpdateMetadata{ + Creator: i.STAKER_0, + Moniker: stringStillAllowed, + Website: stringStillAllowed, + Logo: stringStillAllowed, + } + err := msg.ValidateBasic() + + // ASSERT + Expect(err).To(BeNil()) + }) + + // stringTooLong := stringStillAllowed + "." + It("Exceed max length", func() { + // ARRANGE + var stringTooLong string + for i := 0; i < 256; i++ { + stringTooLong += "." + } + + // ACT + msg := stakerstypes.MsgUpdateMetadata{ + Creator: i.STAKER_0, + Moniker: stringTooLong, + Website: stringTooLong, + Logo: stringTooLong, + } + err := msg.ValidateBasic() + + // ASSERT + Expect(err).ToNot(BeNil()) + }) +}) diff --git a/x/stakers/keeper/msg_server_update_params.go b/x/stakers/keeper/msg_server_update_params.go new file mode 100644 index 00000000..0268d464 --- /dev/null +++ b/x/stakers/keeper/msg_server_update_params.go @@ -0,0 +1,30 @@ +package keeper + +import ( + "context" + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + + // Gov + govTypes "github.com/cosmos/cosmos-sdk/x/gov/types" + // Stakers + "github.com/KYVENetwork/chain/x/stakers/types" +) + +func (k msgServer) UpdateParams(goCtx context.Context, req *types.MsgUpdateParams) (*types.MsgUpdateParamsResponse, error) { + if k.authority != req.Authority { + return nil, errors.Wrapf(govTypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, req.Authority) + } + + ctx := sdk.UnwrapSDKContext(goCtx) + params := k.GetParams(ctx) + + payload := params + _ = json.Unmarshal([]byte(req.Payload), &payload) + k.SetParams(ctx, payload) + + return &types.MsgUpdateParamsResponse{}, nil +} diff --git a/x/stakers/keeper/msg_server_update_params_test.go b/x/stakers/keeper/msg_server_update_params_test.go new file mode 100644 index 00000000..c4ee172f --- /dev/null +++ b/x/stakers/keeper/msg_server_update_params_test.go @@ -0,0 +1,333 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + sdk "github.com/cosmos/cosmos-sdk/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + // Gov + govV1Types "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + // Stakers + "github.com/KYVENetwork/chain/x/stakers/types" +) + +/* + +TEST CASES - msg_server_update_params.go + +* Check default params +* Invalid authority (transaction) +* Invalid authority (proposal) +* Update every param at once +* Update no param +* Update with invalid formatted payload + +* Update unbonding staking time +* Update unbonding staking time with invalid value + +* Update commission change time +* Update commission change time with invalid value + +* Update leave pool time +* Update leave pool time with invalid value + +*/ + +var _ = Describe("msg_server_update_params.go", Ordered, func() { + s := i.NewCleanChain() + + gov := s.App().GovKeeper.GetGovernanceAccount(s.Ctx()).GetAddress().String() + + minDeposit := s.App().GovKeeper.GetDepositParams(s.Ctx()).MinDeposit + votingPeriod := s.App().GovKeeper.GetVotingParams(s.Ctx()).VotingPeriod + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter := sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + + BeforeEach(func() { + s = i.NewCleanChain() + + delegations := s.App().StakingKeeper.GetAllDelegations(s.Ctx()) + voter = sdk.MustAccAddressFromBech32(delegations[0].DelegatorAddress) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Check default params", func() { + // ASSERT + params := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(params.CommissionChangeTime).To(Equal(types.DefaultCommissionChangeTime)) + Expect(params.LeavePoolTime).To(Equal(types.DefaultLeavePoolTime)) + }) + + It("Invalid authority (transaction)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + // ACT + _, err := s.RunTx(msg) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Invalid authority (proposal)", func() { + // ARRANGE + msg := &types.MsgUpdateParams{ + Authority: i.DUMMY[0], + Payload: "{}", + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, err := s.RunTx(proposal) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("Update every param at once", func() { + // ARRANGE + payload := `{ + "unbonding_staking_time": 5, + "commission_change_time": 5, + "leave_pool_time": 5 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(uint64(5))) + Expect(updatedParams.LeavePoolTime).To(Equal(uint64(5))) + }) + + It("Update no params", func() { + // ARRANGE + payload := `{}` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(types.DefaultCommissionChangeTime)) + Expect(updatedParams.LeavePoolTime).To(Equal(types.DefaultLeavePoolTime)) + }) + + It("Update with invalid formatted payload", func() { + // ARRANGE + payload := `{ + "vote_slash": "0.5", + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(types.DefaultCommissionChangeTime)) + Expect(updatedParams.LeavePoolTime).To(Equal(types.DefaultLeavePoolTime)) + }) + + It("Update commission change time", func() { + // ARRANGE + payload := `{ + "commission_change_time": 5 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(uint64(5))) + Expect(updatedParams.LeavePoolTime).To(Equal(types.DefaultLeavePoolTime)) + }) + + It("Update commission change time with invalid value", func() { + // ARRANGE + payload := `{ + "commission_change_time": "5" + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(types.DefaultCommissionChangeTime)) + Expect(updatedParams.LeavePoolTime).To(Equal(types.DefaultLeavePoolTime)) + }) + + It("Update leave pool time", func() { + // ARRANGE + payload := `{ + "leave_pool_time": 5 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + vote := govV1Types.NewMsgVote( + voter, 1, govV1Types.VoteOption_VOTE_OPTION_YES, "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + _, voteErr := s.RunTx(vote) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).NotTo(HaveOccurred()) + Expect(voteErr).NotTo(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(types.DefaultCommissionChangeTime)) + Expect(updatedParams.LeavePoolTime).To(Equal(uint64(5))) + }) + + It("Update leave pool time with invalid value", func() { + // ARRANGE + payload := `{ + "leave_pool_time": -5 + }` + + msg := &types.MsgUpdateParams{ + Authority: gov, + Payload: payload, + } + + proposal, _ := govV1Types.NewMsgSubmitProposal( + []sdk.Msg{msg}, minDeposit, i.DUMMY[0], "", + ) + + // ACT + _, submitErr := s.RunTx(proposal) + + s.CommitAfter(*votingPeriod) + s.Commit() + + // ASSERT + updatedParams := s.App().StakersKeeper.GetParams(s.Ctx()) + + Expect(submitErr).To(HaveOccurred()) + + Expect(updatedParams.CommissionChangeTime).To(Equal(types.DefaultCommissionChangeTime)) + Expect(updatedParams.LeavePoolTime).To(Equal(types.DefaultLeavePoolTime)) + }) +}) diff --git a/x/stakers/module.go b/x/stakers/module.go new file mode 100644 index 00000000..655bc218 --- /dev/null +++ b/x/stakers/module.go @@ -0,0 +1,161 @@ +package stakers + +import ( + "context" + "encoding/json" + "fmt" + // this line is used by starport scaffolding # 1 + + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/KYVENetwork/chain/x/stakers/client/cli" + "github.com/KYVENetwork/chain/x/stakers/keeper" + "github.com/KYVENetwork/chain/x/stakers/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + _ = types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd(types.StoreKey) +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + keeper keeper.Keeper + accountKeeper types.AccountKeeper + bankKeeper types.BankKeeper +} + +func NewAppModule( + cdc codec.Codec, + keeper keeper.Keeper, + accountKeeper types.AccountKeeper, + bankKeeper types.BankKeeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + keeper: keeper, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + am.keeper.ProcessCommissionChangeQueue(ctx) + am.keeper.ProcessLeavePoolQueue(ctx) + return []abci.ValidatorUpdate{} +} diff --git a/x/stakers/spec/01_concepts.md b/x/stakers/spec/01_concepts.md new file mode 100644 index 00000000..e656a75c --- /dev/null +++ b/x/stakers/spec/01_concepts.md @@ -0,0 +1,39 @@ + + +# Concepts + +The stakers module manages all properties of the stakers except their stake +(this is done by the delegation module). It handles staker creation, metadata +(like moniker, logo, website and commission) and the joining and leaving of +pools using valaccounts. + +## Code Structure + +This module adheres to our global coding structure, defined [here](../../../CodeStructure.md). + +## Staker +Every address can create one staker (itself). A staker has the following +metadata which can be changed at any time. +- Moniker +- Logo +- Website + +Additionally, a staker can specify a commission. However, this takes +`CommissionChangeTime` seconds of time before the change is applied. + +## Valaccounts +To join a pool the user creates a valaccount for this pool. +The existence of a valaccount (for a pool) means that the staker +is a member of the given pool and needs to comply with the protocol +in order to not get slashed. +A valaccount consists of the poolId a valaddress and a points counter. +The valaddress is the address of the protocol node which is allowed +to vote in favor of the staker. For certain types of misbehavior +(e.g. being offline) a staker collects points. These are also +stored in the valaccount. + +If a staker wants to leave a pool, a queue entry must be created. After +`LeavePoolTime` seconds of time the actual leaving is performed and the +staker can stop the protocol node for the given pool. \ No newline at end of file diff --git a/x/stakers/spec/02_state.md b/x/stakers/spec/02_state.md new file mode 100644 index 00000000..ceb03135 --- /dev/null +++ b/x/stakers/spec/02_state.md @@ -0,0 +1,140 @@ + + +# State + +## Staker +Every address can create one single staker. Once the staker is created +people can delegate to it and the staker can start joining pools +(if the stake is high enough). + +- Staker: `0x01 | StakerAddr -> ProtocolBuffer(staker)` + +```go +type Staker struct { + Address string + // Needs to be a valid decimal representation + Commission uint64 + Moniker uint64 + Website uint64 + Logo uint64 +} +``` + +## Valaccount +The Valaccount represents the membership of the staker in a given pool. +It contains the address of the protocol node which is allowed to vote +in favor of the staker and stores the poolId as well as a counter for +penalty-points. + +- Valaccount: `0x02 | 0x00 | PoolId | StakerAddr -> ProtocolBuffer(valaccount)` + +One additional index is maintained to query for all valaccounts of a staker. +For this index only the key is used as StakerAddr and PoolId contain all +information to fetch the valaccount using the main key. + +- ValaccountIndex2: `0x02 | 0x01 | StakerAddr | PoolId -> (empty)` + +```go +type Valaccount struct { + // PoolId defines the pool in which the address + // is allowed to vote in. + PoolId uint64 + // Staker is the address the valaccount is voting for. + Staker string + // valaddress is the account stored on the protocol + // node which votes for the staker in the given pool + Valaddress string + // When a node is inactive (does not vote at all) + // a point is added. After a certain amount of points + // is reached, the node gets kicked out. + Points uint64 + // isLeaving indicates if a staker is leaving the given pool. + IsLeaving bool +} +``` + +## Queue + +The staker module contains two queues managing commission changes and +the leaving of pools. + +### QueueState +For the queue the module needs to keep track of the head (HighIndex) and +tail (LowIndex) of the queue. New entries are appended to the +head. The EndBlocker checks the tail if entries are due and processes them. +There are two queues distinguished by the queue identifier. + +- QueueState: `0x1E | 0x02 -> ProtocolBuffer(commissionQueueState)` +- QueueState: `0x1E | 0x03 -> ProtocolBuffer(leaveQueueState)` + +```go +type QueueState struct { + LowIndex uint64 + HighIndex uint64 +} +``` + +### CommissionChangeQueueEntry +Every time a user starts a commission change, an entry is created +and appended to the head of the queue. I.e. the current HighIndex is +incremented and assigned to the entry. +The order of the queue is automatically provided by the KV-Store. + +- CommissionChangeQueueEntry: `0x04 | 0x00 | Index -> ProtocolBuffer(commissionChangeQueueEntry)` + +A second index is provided so that users can query their own pending entries +without iterating the entire queue. The key is unique as there can only be +one commission change entry per staker. If a staker performs another +commission change the current pending entry is overwritten. + +- UndelegationQueueEntryIndex2: `0x04 | 0x01 | StakerAddr -> ProtocolBuffer(commissionChangeQueueEntry)` + + +```go +type CommissionChangeEntry struct { + // Index is needed for the queue-algorithm which + // processes the commission changes + Index uint64 + // Staker is the address of the affected staker + Staker string + // Commission is the new commission which will + // be applied after the waiting time is over. + Commission String + // CreationDate is the UNIX-timestamp in seconds + // when the entry was created. + CreationDate uint64 +} +``` + + +### LeavePoolQueueEntry +Every time a user initiates a pool leave, an entry is created +and appended to the head of the queue, i.e. the current HighIndex is +incremented and assigned to the entry. +The order of the queue is automatically provided by the KV-Store. + +- LeavePoolEntry: `0x05 | 0x00 | Index -> ProtocolBuffer(leavePoolEntry)` + +A second index is provided so that users can query their own pending entries +without iterating the entire queue. + +- LeavePoolEntryIndex2: `0x05 | 0x01 | StakerAddr | PoolId -> ProtocolBuffer(leavePoolEntry)` + + +```go +type CommissionChangeEntry struct { + // Index is needed for the queue-algorithm which + // processes the commission changes + Index uint64 + // Staker is the address of the affected staker + Staker string + // Commission is the new commission which will + // be applied after the waiting time is over. + Commission String + // CreationDate is the UNIX-timestamp in seconds + // when the entry was created. + CreationDate uint64 +} +``` \ No newline at end of file diff --git a/x/stakers/spec/03_messages.md b/x/stakers/spec/03_messages.md new file mode 100644 index 00000000..56921f8f --- /dev/null +++ b/x/stakers/spec/03_messages.md @@ -0,0 +1,44 @@ + + +# Messages + +## `CreateStaker` + +Using this message, a user can create a staker. This can only be executed once +for each address. The sender can specify an amount which in turn is a direct +self-delegation to the given staker. + +## `MsgUpdateMetadata` + +This message changes Moniker, Website and Logo of the staker. The message fails +if the user does not have created a staker yet. + +## `MsgUpdateCommission` + +This message starts a commission change process by creating a new entry in the +commission change queue. Nothing else happens after that. The upcoming +commission change is shown in the staker query. So that delegators can see that +the given staker is about to change its commission. + +After the `CommissionChangeTime` has passed the new commission is applied. + +## `MsgJoinPool` + +This message allows a staker to join a pool. For joining a pool the staker must +provide the poolId and an address which is operated by the protocol node. This +address is allowed to vote in favor of the staker. If this address misbehaves, +the staker will get slashed. The message also takes an amount as an argument +which is transferred to the valaddress. The valaddress needs a small balance to +pay for fees. + +## `MsgLeavePoolResponse` + +This message starts a leave pool process by creating a new entry in the leave +pool queue. Nothing else happens after that. The upcoming pool leave is shown in +the staker query. So that delegators can see that the given staker is about to +leave the given pool. + +After the `LeavePoolTime` has passed the valaccount is deleted and the staker +can shut down the protocol node. \ No newline at end of file diff --git a/x/stakers/spec/04_end_block.md b/x/stakers/spec/04_end_block.md new file mode 100644 index 00000000..8ae0e2e0 --- /dev/null +++ b/x/stakers/spec/04_end_block.md @@ -0,0 +1,9 @@ + + +# EndBlock + +The `x/stakers` module end-block hook handles the commission-change and +leave-pool queue. After the `CommissionChangeTime` resp `LeavePoolTime` has +passed, the queue entry is executed. diff --git a/x/stakers/spec/05_events.md b/x/stakers/spec/05_events.md new file mode 100644 index 00000000..9fa8007b --- /dev/null +++ b/x/stakers/spec/05_events.md @@ -0,0 +1,108 @@ + + +# Events + +The `x/stakers` module contains the following events: + +## EventCreateStaker + +EventBundleProposed indicates that a new staker was created. + +```protobuf +message EventCreateStaker { + // staker is the account address of the protocol node. + string staker = 1; + // amount for inital self-delegation + uint64 amount = 2; +} +``` + +It gets thrown from the following actions: + +- MsgCreateStaker + +## EventUpdateMetadata + +EventUpdateMetadata is an event emitted when a protocol node updates their +metadata. + +```protobuf +message EventUpdateMetadata { + // staker is the account address of the protocol node. + string staker = 1; + // moniker ... + string moniker = 2; + // website ... + string website = 3; + // logo ... + string logo = 4; +} +``` + +It gets thrown from the following actions: + +- MsgUpdateMetadata + +## EventUpdateCommission + +EventUpdateCommission indicates that a staker has changes its commission. + +```protobuf +message EventUpdateCommission { + // staker is the account address of the protocol node. + string staker = 1; + // commission ... + string commission = 2; +} +``` + +It gets thrown from the following actions: + +- EndBlock + +## EventJoinPool + +EventClaimUploaderRole indicates that a staker has joined a pool. + +```protobuf +message EventJoinPool { + // pool_id is the pool the staker joined + uint64 pool_id = 1; + // staker is the address of the staker + string staker = 2; + // valaddress is the address of the protocol node which + // votes in favor of the staker + string valaddress = 3; + // amount is the amount of funds transferred to the valaddress + uint64 amount = 4; +} +``` + +It gets thrown from the following actions: + +- MsgJoinPool + +## EventLeavePool + +EventLeavePool indicates that a staker has left a pool. +Either by leaving or by getting kicked out for the following reasons: + +- misbehaviour (usually together with a slash) +- all pool slots are taken and a node with more stake joined. + +```protobuf +message EventLeavePool { + // pool_id ... + uint64 pool_id = 1; + // staker ... + string staker = 2; +} +``` + +It gets thrown from the following actions: + +- EndBlock +- bundles/MsgSubmitBundleProposal +- MsgJoinPool \ No newline at end of file diff --git a/x/stakers/spec/06_params.md b/x/stakers/spec/06_params.md new file mode 100644 index 00000000..034950c8 --- /dev/null +++ b/x/stakers/spec/06_params.md @@ -0,0 +1,12 @@ + + +# Parameters + +The `x/stakers` module relies on the following parameters: + +| Key | Type | Default Value | +|------------------------|-----------------|---------------| +| `CommissionChangeTime` | uint64 (time s) | 432000 | +| `LeavePoolTime` | uint64 (time s) | 432000 | diff --git a/x/stakers/spec/07_exported.md b/x/stakers/spec/07_exported.md new file mode 100644 index 00000000..836664cb --- /dev/null +++ b/x/stakers/spec/07_exported.md @@ -0,0 +1,65 @@ + + +# Exported + +The `x/stakers` module exports the following functions, which can be used +outside the module. + +```go +type StakersKeeper interface { + + // LeavePool removes a staker from a pool and emits the corresponding event. + // The staker is no longer able to participate in the given pool. + // All points the staker had in that pool are deleted. + LeavePool(ctx sdk.Context, staker string, poolId uint64) + + // GetAllStakerAddressesOfPool returns a list of all stakers + // which have currently a valaccount registered for the given pool + // and are therefore allowed to participate in that pool. + GetAllStakerAddressesOfPool(ctx sdk.Context, poolId uint64) (stakers []string) + + // GetCommission returns the commission of a staker as a parsed sdk.Dec + GetCommission(ctx sdk.Context, stakerAddress string) sdk.Dec + + // AssertValaccountAuthorized checks if the given `valaddress` is allowed to vote in pool + // with id `poolId` to vote in favor of `stakerAddress`. + // If the valaddress is not authorized the appropriate error is returned. + // Otherwise, it returns `nil` + AssertValaccountAuthorized(ctx sdk.Context, poolId uint64, stakerAddress string, valaddress string) error + + // GetActiveStakers returns all staker-addresses that are + // currently participating in at least one pool. + GetActiveStakers(ctx sdk.Context) []string + + // TotalBondedTokens returns all tokens which are currently bonded by the protocol + // I.e. the sum of all delegation of all stakers that are currently participating + // in at least one pool + TotalBondedTokens(ctx sdk.Context) math.Int + + // GetActiveValidators returns all protocol-node information which + // are needed by the governance to calculate the voting powers. + // The interface needs to correspond to github.com/cosmos/cosmos-sdk/x/gov/types/v1.ValidatorGovInfo + // But as there is no direct dependency in the cosmos-sdk-fork this value is passed as an interface{} + GetActiveValidators(ctx sdk.Context) (validators []interface{}) + + // GetDelegations returns the address and the delegation amount of all active protocol-stakers the + // delegator as delegated to. This is used to calculate the vote weight each delegator has. + GetDelegations(ctx sdk.Context, delegator string) (validators []string, amounts []sdk.Dec) + + // IncrementPoints increments to Points for a staker in a given pool. + // Returns the amount of the current points (including the current incrementation) + IncrementPoints(ctx sdk.Context, poolId uint64, stakerAddress string) uint64 + + // ResetPoints sets the point count for the staker in the given pool back to zero. + // Returns the amount of points the staker had before the reset. + ResetPoints(ctx sdk.Context, poolId uint64, stakerAddress string) (previousPoints uint64) + + // DoesValaccountExist only checks if the key is present in the KV-Store + // without loading and unmarshalling to full entry + DoesValaccountExist(ctx sdk.Context, poolId uint64, stakerAddress string) bool + + DoesStakerExist(ctx sdk.Context, staker string) bool +} +``` diff --git a/x/stakers/types/codec.go b/x/stakers/types/codec.go new file mode 100644 index 00000000..cc23de45 --- /dev/null +++ b/x/stakers/types/codec.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterCodec(_ *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgCreateStaker{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateCommission{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateMetadata{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgJoinPool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgLeavePool{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgUpdateParams{}) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/stakers/types/errors.go b/x/stakers/types/errors.go new file mode 100644 index 00000000..32fa3cd2 --- /dev/null +++ b/x/stakers/types/errors.go @@ -0,0 +1,23 @@ +package types + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// staking errors +var ( + ErrStakeTooLow = sdkerrors.Register(ModuleName, 1103, "minimum staking amount of %vkyve not reached") + ErrUnstakeTooHigh = sdkerrors.Register(ModuleName, 1104, "maximum unstaking amount of %vkyve surpassed") + ErrNoStaker = sdkerrors.Register(ModuleName, 1105, "sender is no staker") + ErrAlreadyJoinedPool = sdkerrors.Register(ModuleName, 1106, "already joined pool") + ErrAlreadyLeftPool = sdkerrors.Register(ModuleName, 1107, "already left pool") + ValaddressAlreadyUsed = sdkerrors.Register(ModuleName, 1108, "valaddress already used") + ErrStringMaxLengthExceeded = sdkerrors.Register(ModuleName, 1109, "String length exceeded: %d vs %d") + ErrStakerAlreadyCreated = sdkerrors.Register(ModuleName, 1110, "Staker already created") + ErrValaddressSameAsStaker = sdkerrors.Register(ModuleName, 1111, "Valaddress has same address as Valaddress") + ErrCanNotJoinDisabledPool = sdkerrors.Register(ModuleName, 1112, "can not join disabled pool") + + ErrInvalidCommission = sdkerrors.Register(ModuleName, 1116, "invalid commission %v") + ErrPoolLeaveAlreadyInProgress = sdkerrors.Register(ModuleName, 1117, "Pool leave is already in progress") + ErrValaccountUnauthorized = sdkerrors.Register(ModuleName, 1118, "valaccount unauthorized") +) diff --git a/x/stakers/types/events.pb.go b/x/stakers/types/events.pb.go new file mode 100644 index 00000000..23c49708 --- /dev/null +++ b/x/stakers/types/events.pb.go @@ -0,0 +1,1426 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/stakers/v1beta1/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventCreateStaker is an event emitted when a protocol node stakes in a pool. +// emitted_by: MsgCreateStaker +type EventCreateStaker struct { + // staker is the account address of the protocol node. + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventCreateStaker) Reset() { *m = EventCreateStaker{} } +func (m *EventCreateStaker) String() string { return proto.CompactTextString(m) } +func (*EventCreateStaker) ProtoMessage() {} +func (*EventCreateStaker) Descriptor() ([]byte, []int) { + return fileDescriptor_7a1b3dc9634155a0, []int{0} +} +func (m *EventCreateStaker) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventCreateStaker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventCreateStaker.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventCreateStaker) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventCreateStaker.Merge(m, src) +} +func (m *EventCreateStaker) XXX_Size() int { + return m.Size() +} +func (m *EventCreateStaker) XXX_DiscardUnknown() { + xxx_messageInfo_EventCreateStaker.DiscardUnknown(m) +} + +var xxx_messageInfo_EventCreateStaker proto.InternalMessageInfo + +func (m *EventCreateStaker) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventCreateStaker) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventUpdateMetadata is an event emitted when a protocol node updates their metadata. +// emitted_by: MsgUpdateMetadata +type EventUpdateMetadata struct { + // staker is the account address of the protocol node. + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // moniker ... + Moniker string `protobuf:"bytes,2,opt,name=moniker,proto3" json:"moniker,omitempty"` + // website ... + Website string `protobuf:"bytes,3,opt,name=website,proto3" json:"website,omitempty"` + // logo ... + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` +} + +func (m *EventUpdateMetadata) Reset() { *m = EventUpdateMetadata{} } +func (m *EventUpdateMetadata) String() string { return proto.CompactTextString(m) } +func (*EventUpdateMetadata) ProtoMessage() {} +func (*EventUpdateMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_7a1b3dc9634155a0, []int{1} +} +func (m *EventUpdateMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventUpdateMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventUpdateMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventUpdateMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventUpdateMetadata.Merge(m, src) +} +func (m *EventUpdateMetadata) XXX_Size() int { + return m.Size() +} +func (m *EventUpdateMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_EventUpdateMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_EventUpdateMetadata proto.InternalMessageInfo + +func (m *EventUpdateMetadata) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventUpdateMetadata) GetMoniker() string { + if m != nil { + return m.Moniker + } + return "" +} + +func (m *EventUpdateMetadata) GetWebsite() string { + if m != nil { + return m.Website + } + return "" +} + +func (m *EventUpdateMetadata) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +// EventUpdateCommission ... +// emitted_by: EndBlock +type EventUpdateCommission struct { + // staker is the account address of the protocol node. + Staker string `protobuf:"bytes,1,opt,name=staker,proto3" json:"staker,omitempty"` + // commission ... + Commission string `protobuf:"bytes,2,opt,name=commission,proto3" json:"commission,omitempty"` +} + +func (m *EventUpdateCommission) Reset() { *m = EventUpdateCommission{} } +func (m *EventUpdateCommission) String() string { return proto.CompactTextString(m) } +func (*EventUpdateCommission) ProtoMessage() {} +func (*EventUpdateCommission) Descriptor() ([]byte, []int) { + return fileDescriptor_7a1b3dc9634155a0, []int{2} +} +func (m *EventUpdateCommission) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventUpdateCommission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventUpdateCommission.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventUpdateCommission) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventUpdateCommission.Merge(m, src) +} +func (m *EventUpdateCommission) XXX_Size() int { + return m.Size() +} +func (m *EventUpdateCommission) XXX_DiscardUnknown() { + xxx_messageInfo_EventUpdateCommission.DiscardUnknown(m) +} + +var xxx_messageInfo_EventUpdateCommission proto.InternalMessageInfo + +func (m *EventUpdateCommission) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventUpdateCommission) GetCommission() string { + if m != nil { + return m.Commission + } + return "" +} + +// EventJoinPool ... +// emitted_by: MsgJoinPool +type EventJoinPool struct { + // pool_id is the pool the staker joined + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker is the address of the staker + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // valaddress is the address of the protocol node which + // votes in favor of the staker + Valaddress string `protobuf:"bytes,3,opt,name=valaddress,proto3" json:"valaddress,omitempty"` + // amount is the amount of funds transferred to the valaddress + Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventJoinPool) Reset() { *m = EventJoinPool{} } +func (m *EventJoinPool) String() string { return proto.CompactTextString(m) } +func (*EventJoinPool) ProtoMessage() {} +func (*EventJoinPool) Descriptor() ([]byte, []int) { + return fileDescriptor_7a1b3dc9634155a0, []int{3} +} +func (m *EventJoinPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventJoinPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventJoinPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventJoinPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventJoinPool.Merge(m, src) +} +func (m *EventJoinPool) XXX_Size() int { + return m.Size() +} +func (m *EventJoinPool) XXX_DiscardUnknown() { + xxx_messageInfo_EventJoinPool.DiscardUnknown(m) +} + +var xxx_messageInfo_EventJoinPool proto.InternalMessageInfo + +func (m *EventJoinPool) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventJoinPool) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *EventJoinPool) GetValaddress() string { + if m != nil { + return m.Valaddress + } + return "" +} + +func (m *EventJoinPool) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventLeavePool ... +// emitted_by: EndBlock +type EventLeavePool struct { + // pool_id ... + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker ... + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` +} + +func (m *EventLeavePool) Reset() { *m = EventLeavePool{} } +func (m *EventLeavePool) String() string { return proto.CompactTextString(m) } +func (*EventLeavePool) ProtoMessage() {} +func (*EventLeavePool) Descriptor() ([]byte, []int) { + return fileDescriptor_7a1b3dc9634155a0, []int{4} +} +func (m *EventLeavePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventLeavePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventLeavePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventLeavePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventLeavePool.Merge(m, src) +} +func (m *EventLeavePool) XXX_Size() int { + return m.Size() +} +func (m *EventLeavePool) XXX_DiscardUnknown() { + xxx_messageInfo_EventLeavePool.DiscardUnknown(m) +} + +var xxx_messageInfo_EventLeavePool proto.InternalMessageInfo + +func (m *EventLeavePool) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *EventLeavePool) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func init() { + proto.RegisterType((*EventCreateStaker)(nil), "kyve.stakers.v1beta1.EventCreateStaker") + proto.RegisterType((*EventUpdateMetadata)(nil), "kyve.stakers.v1beta1.EventUpdateMetadata") + proto.RegisterType((*EventUpdateCommission)(nil), "kyve.stakers.v1beta1.EventUpdateCommission") + proto.RegisterType((*EventJoinPool)(nil), "kyve.stakers.v1beta1.EventJoinPool") + proto.RegisterType((*EventLeavePool)(nil), "kyve.stakers.v1beta1.EventLeavePool") +} + +func init() { proto.RegisterFile("kyve/stakers/v1beta1/events.proto", fileDescriptor_7a1b3dc9634155a0) } + +var fileDescriptor_7a1b3dc9634155a0 = []byte{ + // 342 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcd, 0x4a, 0xeb, 0x40, + 0x14, 0xc7, 0x9b, 0xde, 0xd0, 0x72, 0x07, 0xee, 0x85, 0x9b, 0xeb, 0x47, 0x56, 0x43, 0xcd, 0xaa, + 0x0b, 0x49, 0x28, 0x3e, 0x81, 0x96, 0x0a, 0x7e, 0x4b, 0x45, 0x41, 0x37, 0x32, 0x69, 0x0e, 0xed, + 0xd0, 0x24, 0x27, 0x64, 0x4e, 0xd3, 0xf6, 0x2d, 0x7c, 0x2c, 0x97, 0x5d, 0xba, 0x94, 0xf6, 0x45, + 0x24, 0xd3, 0xb1, 0x44, 0xa1, 0x1b, 0x77, 0xf9, 0x7f, 0xf0, 0xff, 0x91, 0xe1, 0xb0, 0x83, 0xf1, + 0xbc, 0x80, 0x40, 0x91, 0x18, 0x43, 0xae, 0x82, 0xa2, 0x13, 0x02, 0x89, 0x4e, 0x00, 0x05, 0xa4, + 0xa4, 0xfc, 0x2c, 0x47, 0x42, 0x67, 0xa7, 0xac, 0xf8, 0xa6, 0xe2, 0x9b, 0x8a, 0xd7, 0x65, 0xff, + 0x7a, 0x65, 0xab, 0x9b, 0x83, 0x20, 0xb8, 0xd3, 0xa9, 0xb3, 0xc7, 0x1a, 0xeb, 0x9e, 0x6b, 0xb5, + 0xac, 0xf6, 0xef, 0xbe, 0x51, 0xa5, 0x2f, 0x12, 0x9c, 0xa4, 0xe4, 0xd6, 0x5b, 0x56, 0xdb, 0xee, + 0x1b, 0xe5, 0x4d, 0xd8, 0x7f, 0x3d, 0x72, 0x9f, 0x45, 0x82, 0xe0, 0x0a, 0x48, 0x44, 0x82, 0xc4, + 0xd6, 0x19, 0x97, 0x35, 0x13, 0x4c, 0x65, 0x19, 0xd4, 0x75, 0xf0, 0x29, 0xcb, 0x64, 0x0a, 0xa1, + 0x92, 0x04, 0xee, 0xaf, 0x75, 0x62, 0xa4, 0xe3, 0x30, 0x3b, 0xc6, 0x21, 0xba, 0xb6, 0xb6, 0xf5, + 0xb7, 0x77, 0xc3, 0x76, 0x2b, 0xd8, 0x2e, 0x26, 0x89, 0x54, 0x4a, 0x62, 0xba, 0x15, 0xcc, 0x19, + 0x1b, 0x6c, 0x5a, 0x86, 0x5d, 0x71, 0xbc, 0x19, 0xfb, 0xa3, 0x07, 0xcf, 0x51, 0xa6, 0xb7, 0x88, + 0xb1, 0xb3, 0xcf, 0x9a, 0x19, 0x62, 0xfc, 0x2c, 0x23, 0xbd, 0x64, 0xf7, 0x1b, 0xa5, 0x3c, 0x8b, + 0x2a, 0x84, 0xfa, 0x77, 0x42, 0x21, 0x62, 0x11, 0x45, 0x39, 0x28, 0x65, 0xfe, 0xa1, 0xe2, 0x54, + 0x5e, 0xd0, 0xfe, 0xf2, 0x82, 0xc7, 0xec, 0xaf, 0x26, 0x5f, 0x82, 0x28, 0xe0, 0x47, 0xe8, 0x93, + 0xd3, 0xd7, 0x25, 0xb7, 0x16, 0x4b, 0x6e, 0xbd, 0x2f, 0xb9, 0xf5, 0xb2, 0xe2, 0xb5, 0xc5, 0x8a, + 0xd7, 0xde, 0x56, 0xbc, 0xf6, 0x74, 0x38, 0x94, 0x34, 0x9a, 0x84, 0xfe, 0x00, 0x93, 0xe0, 0xe2, + 0xf1, 0xa1, 0x77, 0x0d, 0x34, 0xc5, 0x7c, 0x1c, 0x0c, 0x46, 0x42, 0xa6, 0xc1, 0x6c, 0x73, 0x36, + 0x34, 0xcf, 0x40, 0x85, 0x0d, 0x7d, 0x2e, 0x47, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x2b, + 0x0f, 0x0f, 0x53, 0x02, 0x00, 0x00, +} + +func (m *EventCreateStaker) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventCreateStaker) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventCreateStaker) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventUpdateMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventUpdateMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventUpdateMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Website) > 0 { + i -= len(m.Website) + copy(dAtA[i:], m.Website) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Website))) + i-- + dAtA[i] = 0x1a + } + if len(m.Moniker) > 0 { + i -= len(m.Moniker) + copy(dAtA[i:], m.Moniker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Moniker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventUpdateCommission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventUpdateCommission) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventUpdateCommission) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Commission) > 0 { + i -= len(m.Commission) + copy(dAtA[i:], m.Commission) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Commission))) + i-- + dAtA[i] = 0x12 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventJoinPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventJoinPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventJoinPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x20 + } + if len(m.Valaddress) > 0 { + i -= len(m.Valaddress) + copy(dAtA[i:], m.Valaddress) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Valaddress))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventLeavePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventLeavePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventLeavePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventCreateStaker) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventUpdateMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Moniker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Website) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventUpdateCommission) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Commission) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventJoinPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + l = len(m.Valaddress) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventLeavePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovEvents(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventCreateStaker) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventCreateStaker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventCreateStaker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventUpdateMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventUpdateMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventUpdateMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Moniker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Website = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventUpdateCommission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventUpdateCommission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventUpdateCommission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commission", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commission = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventJoinPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventJoinPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventJoinPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Valaddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Valaddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventLeavePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventLeavePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventLeavePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/stakers/types/expected_keepers.go b/x/stakers/types/expected_keepers.go new file mode 100644 index 00000000..dcf2d185 --- /dev/null +++ b/x/stakers/types/expected_keepers.go @@ -0,0 +1,39 @@ +package types + +import ( + poolTypes "github.com/KYVENetwork/chain/x/pool/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/upgrade/types" +) + +// AccountKeeper defines the expected account keeper used for simulations (noalias) +type AccountKeeper interface { + GetModuleAddress(moduleName string) sdk.AccAddress +} + +type DistrKeeper interface { + FundCommunityPool(ctx sdk.Context, amount sdk.Coins, sender sdk.AccAddress) error +} + +// BankKeeper defines the expected interface needed to retrieve account balances. +type BankKeeper interface { + SendCoins(ctx sdk.Context, fromAddr sdk.AccAddress, toAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToAccount(ctx sdk.Context, senderModule string, recipientAddr sdk.AccAddress, amt sdk.Coins) error + SendCoinsFromModuleToModule(ctx sdk.Context, senderModule, recipientModule string, amt sdk.Coins) error + SendCoinsFromAccountToModule(ctx sdk.Context, senderAddr sdk.AccAddress, recipientModule string, amt sdk.Coins) error +} + +type PoolKeeper interface { + AssertPoolExists(ctx sdk.Context, poolId uint64) error + GetPoolWithError(ctx sdk.Context, poolId uint64) (poolTypes.Pool, error) +} + +type UpgradeKeeper interface { + ScheduleUpgrade(ctx sdk.Context, plan types.Plan) error +} + +type DelegationKeeper interface { + GetDelegationAmount(ctx sdk.Context, staker string) uint64 + GetDelegationAmountOfDelegator(ctx sdk.Context, stakerAddress string, delegatorAddress string) uint64 + GetStakersByDelegator(ctx sdk.Context, delegator string) []string +} diff --git a/x/stakers/types/genesis.go b/x/stakers/types/genesis.go new file mode 100644 index 00000000..dc1f54b3 --- /dev/null +++ b/x/stakers/types/genesis.go @@ -0,0 +1,69 @@ +package types + +import ( + "fmt" +) + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + } +} + +// Validate performs basic genesis state validation returning an error upon any failure. +func (gs GenesisState) Validate() error { + // Staker + stakerLeaving := make(map[string]bool) + + // Valaccounts + valaccountMap := make(map[string]struct{}) + for _, elem := range gs.ValaccountList { + index := string(ValaccountKey(elem.PoolId, elem.Staker)) + if _, ok := valaccountMap[index]; ok { + return fmt.Errorf("duplicated index for valaccount %v", elem) + } + valaccountMap[index] = struct{}{} + stakerLeaving[index] = elem.IsLeaving + } + + // Commission Change + commissionChangeMap := make(map[string]struct{}) + + for _, elem := range gs.CommissionChangeEntries { + index := string(CommissionChangeEntryKey(elem.Index)) + if _, ok := commissionChangeMap[index]; ok { + return fmt.Errorf("duplicated index for commission change entry %v", elem) + } + if elem.Index > gs.QueueStateCommission.HighIndex { + return fmt.Errorf("commission change entry index too high: %v", elem) + } + if elem.Index < gs.QueueStateCommission.LowIndex { + return fmt.Errorf("commission change entry index too low: %v", elem) + } + + commissionChangeMap[index] = struct{}{} + } + + // Leave Pool + for _, elem := range gs.LeavePoolEntries { + if elem.Index > gs.QueueStateLeave.HighIndex { + return fmt.Errorf("unbonding stake entry index too high: %v", elem) + } + if elem.Index < gs.QueueStateLeave.LowIndex { + return fmt.Errorf("unbonding stake entry index too low: %v", elem) + } + if !stakerLeaving[string(ValaccountKey(elem.PoolId, elem.Staker))] { + return fmt.Errorf("inconsistent staker leave: %v", elem) + } + stakerLeaving[string(ValaccountKey(elem.PoolId, elem.Staker))] = false + } + + for staker, isLeaving := range stakerLeaving { + if isLeaving { + return fmt.Errorf("inconsistent staker leave: %v", staker) + } + } + + return gs.Params.Validate() +} diff --git a/x/stakers/types/genesis.pb.go b/x/stakers/types/genesis.pb.go new file mode 100644 index 00000000..4b6b0473 --- /dev/null +++ b/x/stakers/types/genesis.pb.go @@ -0,0 +1,698 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/stakers/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the stakers module's genesis state. +type GenesisState struct { + // params defines all the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + // staker_list ... + StakerList []Staker `protobuf:"bytes,2,rep,name=staker_list,json=stakerList,proto3" json:"staker_list"` + // valaccount_list ... + ValaccountList []Valaccount `protobuf:"bytes,3,rep,name=valaccount_list,json=valaccountList,proto3" json:"valaccount_list"` + // commission_change_entries ... + CommissionChangeEntries []CommissionChangeEntry `protobuf:"bytes,4,rep,name=commission_change_entries,json=commissionChangeEntries,proto3" json:"commission_change_entries"` + // queue_state_commission ... + QueueStateCommission QueueState `protobuf:"bytes,5,opt,name=queue_state_commission,json=queueStateCommission,proto3" json:"queue_state_commission"` + // leave_pool_entries ... + LeavePoolEntries []LeavePoolEntry `protobuf:"bytes,6,rep,name=leave_pool_entries,json=leavePoolEntries,proto3" json:"leave_pool_entries"` + // queue_state_leave ... + QueueStateLeave QueueState `protobuf:"bytes,7,opt,name=queue_state_leave,json=queueStateLeave,proto3" json:"queue_state_leave"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_0deb2ee89d595051, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetStakerList() []Staker { + if m != nil { + return m.StakerList + } + return nil +} + +func (m *GenesisState) GetValaccountList() []Valaccount { + if m != nil { + return m.ValaccountList + } + return nil +} + +func (m *GenesisState) GetCommissionChangeEntries() []CommissionChangeEntry { + if m != nil { + return m.CommissionChangeEntries + } + return nil +} + +func (m *GenesisState) GetQueueStateCommission() QueueState { + if m != nil { + return m.QueueStateCommission + } + return QueueState{} +} + +func (m *GenesisState) GetLeavePoolEntries() []LeavePoolEntry { + if m != nil { + return m.LeavePoolEntries + } + return nil +} + +func (m *GenesisState) GetQueueStateLeave() QueueState { + if m != nil { + return m.QueueStateLeave + } + return QueueState{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "kyve.stakers.v1beta1.GenesisState") +} + +func init() { + proto.RegisterFile("kyve/stakers/v1beta1/genesis.proto", fileDescriptor_0deb2ee89d595051) +} + +var fileDescriptor_0deb2ee89d595051 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x51, 0x6b, 0xda, 0x50, + 0x14, 0xc7, 0x93, 0xe9, 0x1c, 0x5c, 0xc7, 0xdc, 0x82, 0x6c, 0x99, 0x8c, 0xcc, 0xc9, 0x1e, 0x06, + 0x1b, 0x09, 0x6e, 0x6f, 0x7b, 0x54, 0x6c, 0x1f, 0x2a, 0xad, 0x55, 0x90, 0xb6, 0x14, 0xc2, 0x35, + 0x1c, 0xe2, 0xc5, 0x24, 0x37, 0xe6, 0xde, 0xa4, 0xf5, 0x5b, 0xf4, 0x63, 0xf9, 0xe8, 0x63, 0xa1, + 0x50, 0x8a, 0x7e, 0x91, 0x92, 0x9b, 0xdb, 0xc4, 0x87, 0xf8, 0xd0, 0x37, 0xbd, 0xe7, 0x77, 0x7e, + 0xe7, 0x7f, 0x6e, 0x2e, 0xea, 0x2c, 0x56, 0x09, 0x58, 0x8c, 0xe3, 0x05, 0x44, 0xcc, 0x4a, 0xba, + 0x33, 0xe0, 0xb8, 0x6b, 0xb9, 0x10, 0x00, 0x23, 0xcc, 0x0c, 0x23, 0xca, 0xa9, 0xd6, 0x4c, 0x19, + 0x53, 0x32, 0xa6, 0x64, 0x5a, 0x4d, 0x97, 0xba, 0x54, 0x00, 0x56, 0xfa, 0x2b, 0x63, 0x5b, 0x3f, + 0x4a, 0x7d, 0x21, 0x8e, 0xb0, 0x2f, 0x75, 0xad, 0xf2, 0x91, 0x2f, 0x7a, 0xc1, 0x74, 0x1e, 0xaa, + 0xe8, 0xfd, 0x71, 0x16, 0x62, 0xc2, 0x31, 0x07, 0xed, 0x3f, 0xaa, 0x65, 0x12, 0x5d, 0x6d, 0xab, + 0xbf, 0xea, 0x7f, 0xbf, 0x99, 0x65, 0xa1, 0xcc, 0x91, 0x60, 0x7a, 0xd5, 0xf5, 0xe3, 0x77, 0x65, + 0x2c, 0x3b, 0xb4, 0x3e, 0xaa, 0x67, 0x9c, 0xed, 0x11, 0xc6, 0xf5, 0x37, 0xed, 0xca, 0x61, 0xc1, + 0x44, 0xfc, 0x97, 0x02, 0x94, 0x55, 0x87, 0x84, 0x71, 0xed, 0x0c, 0x35, 0x12, 0xec, 0x61, 0xc7, + 0xa1, 0x71, 0xc0, 0x33, 0x51, 0x45, 0x88, 0xda, 0xe5, 0xa2, 0x69, 0x0e, 0x4b, 0xd9, 0x87, 0xa2, + 0x5d, 0x08, 0x7d, 0xf4, 0xd5, 0xa1, 0xbe, 0x4f, 0x18, 0x23, 0x34, 0xb0, 0x9d, 0x39, 0x0e, 0x5c, + 0xb0, 0x21, 0xe0, 0x11, 0x01, 0xa6, 0x57, 0x85, 0xfa, 0x77, 0xb9, 0xba, 0x9f, 0xb7, 0xf5, 0x45, + 0xd7, 0x20, 0xe0, 0xd1, 0x4a, 0x4e, 0xf9, 0xe2, 0x94, 0x14, 0x09, 0x30, 0xed, 0x1a, 0x7d, 0x5e, + 0xc6, 0x10, 0x83, 0xcd, 0xd2, 0xfb, 0xb4, 0x0b, 0x4c, 0x7f, 0x2b, 0x2e, 0xf4, 0xc0, 0x1a, 0xe7, + 0x69, 0x8f, 0xf8, 0x04, 0x72, 0x40, 0x73, 0x99, 0x9f, 0x14, 0x39, 0xb4, 0x0b, 0xa4, 0x79, 0x80, + 0x13, 0xb0, 0x43, 0x4a, 0xbd, 0x7c, 0x8b, 0x9a, 0xd8, 0xe2, 0x67, 0xb9, 0x79, 0x98, 0xf2, 0x23, + 0x4a, 0xbd, 0xfd, 0xf8, 0x1f, 0xbd, 0xfd, 0xd3, 0x34, 0xf7, 0x18, 0x7d, 0xda, 0xcf, 0x2d, 0xea, + 0xfa, 0xbb, 0x57, 0x45, 0x6e, 0x14, 0x91, 0xc5, 0xd0, 0xde, 0xd1, 0x7a, 0x6b, 0xa8, 0x9b, 0xad, + 0xa1, 0x3e, 0x6d, 0x0d, 0xf5, 0x6e, 0x67, 0x28, 0x9b, 0x9d, 0xa1, 0xdc, 0xef, 0x0c, 0xe5, 0xea, + 0x8f, 0x4b, 0xf8, 0x3c, 0x9e, 0x99, 0x0e, 0xf5, 0xad, 0x93, 0xcb, 0xe9, 0xe0, 0x14, 0xf8, 0x0d, + 0x8d, 0x16, 0x96, 0x33, 0xc7, 0x24, 0xb0, 0x6e, 0xf3, 0x57, 0xcb, 0x57, 0x21, 0xb0, 0x59, 0x4d, + 0x3c, 0xd6, 0x7f, 0xcf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x65, 0x34, 0xdb, 0xba, 0x45, 0x03, 0x00, + 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.QueueStateLeave.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.LeavePoolEntries) > 0 { + for iNdEx := len(m.LeavePoolEntries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.LeavePoolEntries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.QueueStateCommission.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.CommissionChangeEntries) > 0 { + for iNdEx := len(m.CommissionChangeEntries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CommissionChangeEntries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.ValaccountList) > 0 { + for iNdEx := len(m.ValaccountList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValaccountList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.StakerList) > 0 { + for iNdEx := len(m.StakerList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.StakerList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.StakerList) > 0 { + for _, e := range m.StakerList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.ValaccountList) > 0 { + for _, e := range m.ValaccountList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.CommissionChangeEntries) > 0 { + for _, e := range m.CommissionChangeEntries { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.QueueStateCommission.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.LeavePoolEntries) > 0 { + for _, e := range m.LeavePoolEntries { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.QueueStateLeave.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StakerList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StakerList = append(m.StakerList, Staker{}) + if err := m.StakerList[len(m.StakerList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValaccountList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValaccountList = append(m.ValaccountList, Valaccount{}) + if err := m.ValaccountList[len(m.ValaccountList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommissionChangeEntries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommissionChangeEntries = append(m.CommissionChangeEntries, CommissionChangeEntry{}) + if err := m.CommissionChangeEntries[len(m.CommissionChangeEntries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueStateCommission", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.QueueStateCommission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeavePoolEntries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LeavePoolEntries = append(m.LeavePoolEntries, LeavePoolEntry{}) + if err := m.LeavePoolEntries[len(m.LeavePoolEntries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueStateLeave", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.QueueStateLeave.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/stakers/types/keys.go b/x/stakers/types/keys.go new file mode 100644 index 00000000..01295ead --- /dev/null +++ b/x/stakers/types/keys.go @@ -0,0 +1,99 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +const ( + // ModuleName defines the module name + ModuleName = "stakers" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey defines the module's message routing key + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_stakers" +) + +var ( + // ParamsKey is the prefix for all module params defined in params.proto + ParamsKey = []byte{0x00} + + // StakerKeyPrefix is indexed by the staker address + // and contains all stakers regardless of the pool + // key -> StakerKeyPrefix | + StakerKeyPrefix = []byte{1} + + // ValaccountPrefix stores valaccount for each staker and pool + // ValaccountPrefix | | + ValaccountPrefix = []byte{2, 0} + // ValaccountPrefixIndex2 | | + ValaccountPrefixIndex2 = []byte{2, 1} + + // CommissionChangeEntryKeyPrefix | + CommissionChangeEntryKeyPrefix = []byte{4, 0} + // CommissionChangeEntryKeyPrefixIndex2 | + CommissionChangeEntryKeyPrefixIndex2 = []byte{4, 1} + + // LeavePoolEntryKeyPrefix | + LeavePoolEntryKeyPrefix = []byte{5, 0} + // LeavePoolEntryKeyPrefixIndex2 | | + LeavePoolEntryKeyPrefixIndex2 = []byte{5, 1} + + ActiveStakerIndex = []byte{6} +) + +// ENUM aggregated data types +type STAKER_STATS string + +var STAKER_STATS_COUNT STAKER_STATS = "total_stakers" + +// ENUM queue types identifiers +type QUEUE_IDENTIFIER []byte + +var ( + QUEUE_IDENTIFIER_COMMISSION QUEUE_IDENTIFIER = []byte{30, 2} + QUEUE_IDENTIFIER_LEAVE QUEUE_IDENTIFIER = []byte{30, 3} +) + +const ( + MaxStakers = 50 + DefaultCommission = "0.9" +) + +// StakerKey returns the store Key to retrieve a Staker from the index fields +func StakerKey(staker string) []byte { + return util.GetByteKey(staker) +} + +func ValaccountKey(poolId uint64, staker string) []byte { + return util.GetByteKey(poolId, staker) +} + +func ValaccountKeyIndex2(staker string, poolId uint64) []byte { + return util.GetByteKey(staker, poolId) +} + +func CommissionChangeEntryKey(index uint64) []byte { + return util.GetByteKey(index) +} + +// Important: only one queue entry per staker is allowed at a time. +func CommissionChangeEntryKeyIndex2(staker string) []byte { + return util.GetByteKey(staker) +} + +func LeavePoolEntryKey(index uint64) []byte { + return util.GetByteKey(index) +} + +func LeavePoolEntryKeyIndex2(staker string, poolId uint64) []byte { + return util.GetByteKey(staker, poolId) +} + +func ActiveStakerKeyIndex(staker string) []byte { + return util.GetByteKey(staker) +} diff --git a/x/stakers/types/message_create_staker.go b/x/stakers/types/message_create_staker.go new file mode 100644 index 00000000..ef0c240c --- /dev/null +++ b/x/stakers/types/message_create_staker.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgCreateStaker = "create_staker" + +var _ sdk.Msg = &MsgCreateStaker{} + +func (msg *MsgCreateStaker) Route() string { + return RouterKey +} + +func (msg *MsgCreateStaker) Type() string { + return TypeMsgCreateStaker +} + +func (msg *MsgCreateStaker) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgCreateStaker) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgCreateStaker) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/stakers/types/message_join_pool.go b/x/stakers/types/message_join_pool.go new file mode 100644 index 00000000..0dfbfd17 --- /dev/null +++ b/x/stakers/types/message_join_pool.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgJoinPool = "join_pool" + +var _ sdk.Msg = &MsgJoinPool{} + +func (msg *MsgJoinPool) Route() string { + return RouterKey +} + +func (msg *MsgJoinPool) Type() string { + return TypeMsgJoinPool +} + +func (msg *MsgJoinPool) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgJoinPool) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgJoinPool) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/stakers/types/message_leavel_pool.go b/x/stakers/types/message_leavel_pool.go new file mode 100644 index 00000000..cb1fc768 --- /dev/null +++ b/x/stakers/types/message_leavel_pool.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgLeavePool = "leave_pool" + +var _ sdk.Msg = &MsgLeavePool{} + +func (msg *MsgLeavePool) Route() string { + return RouterKey +} + +func (msg *MsgLeavePool) Type() string { + return TypeMsgLeavePool +} + +func (msg *MsgLeavePool) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgLeavePool) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgLeavePool) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/stakers/types/message_update_commission.go b/x/stakers/types/message_update_commission.go new file mode 100644 index 00000000..a9c82f32 --- /dev/null +++ b/x/stakers/types/message_update_commission.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgUpdateCommission = "update_commission" + +var _ sdk.Msg = &MsgUpdateCommission{} + +func (msg *MsgUpdateCommission) Route() string { + return RouterKey +} + +func (msg *MsgUpdateCommission) Type() string { + return TypeMsgUpdateCommission +} + +func (msg *MsgUpdateCommission) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgUpdateCommission) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgUpdateCommission) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + return nil +} diff --git a/x/stakers/types/message_update_metadata.go b/x/stakers/types/message_update_metadata.go new file mode 100644 index 00000000..94db5cab --- /dev/null +++ b/x/stakers/types/message_update_metadata.go @@ -0,0 +1,52 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgUpdateMetadata = "update_metadata" + +var _ sdk.Msg = &MsgUpdateMetadata{} + +func (msg *MsgUpdateMetadata) Route() string { + return RouterKey +} + +func (msg *MsgUpdateMetadata) Type() string { + return TypeMsgUpdateMetadata +} + +func (msg *MsgUpdateMetadata) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgUpdateMetadata) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgUpdateMetadata) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Creator) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid creator address (%s)", err) + } + + if len(msg.Logo) > 255 { + return sdkerrors.Wrapf(sdkerrors.ErrLogic, ErrStringMaxLengthExceeded.Error(), len(msg.Logo), 255) + } + + if len(msg.Website) > 255 { + return sdkerrors.Wrapf(sdkerrors.ErrLogic, ErrStringMaxLengthExceeded.Error(), len(msg.Website), 255) + } + + if len(msg.Moniker) > 255 { + return sdkerrors.Wrapf(sdkerrors.ErrLogic, ErrStringMaxLengthExceeded.Error(), len(msg.Moniker), 255) + } + + return nil +} diff --git a/x/stakers/types/msgs.go b/x/stakers/types/msgs.go new file mode 100644 index 00000000..e568d11e --- /dev/null +++ b/x/stakers/types/msgs.go @@ -0,0 +1,35 @@ +package types + +import ( + "encoding/json" + + "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ sdk.Msg = &MsgUpdateParams{} + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (msg *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errors.Wrap(err, "invalid authority address") + } + + params := DefaultParams() + if err := json.Unmarshal([]byte(msg.Payload), ¶ms); err != nil { + return err + } + + if err := params.Validate(); err != nil { + return err + } + + return nil +} diff --git a/x/stakers/types/params.go b/x/stakers/types/params.go new file mode 100644 index 00000000..76a01b9b --- /dev/null +++ b/x/stakers/types/params.go @@ -0,0 +1,43 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +// DefaultCommissionChangeTime ... +var DefaultCommissionChangeTime = uint64(60 * 60 * 24 * 5) + +// DefaultLeavePoolTime ... +var DefaultLeavePoolTime = uint64(60 * 60 * 24 * 5) + +// NewParams creates a new Params instance +func NewParams( + commissionChangeTime uint64, + leavePoolTime uint64, +) Params { + return Params{ + CommissionChangeTime: commissionChangeTime, + LeavePoolTime: leavePoolTime, + } +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams( + DefaultCommissionChangeTime, + DefaultLeavePoolTime, + ) +} + +// Validate validates the set of params +func (p Params) Validate() error { + if err := util.ValidateUint64(p.CommissionChangeTime); err != nil { + return err + } + + if err := util.ValidateUint64(p.LeavePoolTime); err != nil { + return err + } + + return nil +} diff --git a/x/stakers/types/params.pb.go b/x/stakers/types/params.pb.go new file mode 100644 index 00000000..d765da61 --- /dev/null +++ b/x/stakers/types/params.pb.go @@ -0,0 +1,340 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/stakers/v1beta1/params.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the stakers module parameters. +type Params struct { + // commission_change_time ... + CommissionChangeTime uint64 `protobuf:"varint,1,opt,name=commission_change_time,json=commissionChangeTime,proto3" json:"commission_change_time,omitempty"` + // commission_change_time ... + LeavePoolTime uint64 `protobuf:"varint,2,opt,name=leave_pool_time,json=leavePoolTime,proto3" json:"leave_pool_time,omitempty"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_405cabd7005fc18b, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetCommissionChangeTime() uint64 { + if m != nil { + return m.CommissionChangeTime + } + return 0 +} + +func (m *Params) GetLeavePoolTime() uint64 { + if m != nil { + return m.LeavePoolTime + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "kyve.stakers.v1beta1.Params") +} + +func init() { proto.RegisterFile("kyve/stakers/v1beta1/params.proto", fileDescriptor_405cabd7005fc18b) } + +var fileDescriptor_405cabd7005fc18b = []byte{ + // 219 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcc, 0xae, 0x2c, 0x4b, + 0xd5, 0x2f, 0x2e, 0x49, 0xcc, 0x4e, 0x2d, 0x2a, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x01, + 0x29, 0xd1, 0x83, 0x2a, 0xd1, 0x83, 0x2a, 0x51, 0x4a, 0xe3, 0x62, 0x0b, 0x00, 0xab, 0x12, 0x32, + 0xe1, 0x12, 0x4b, 0xce, 0xcf, 0xcd, 0xcd, 0x2c, 0x2e, 0xce, 0xcc, 0xcf, 0x8b, 0x4f, 0xce, 0x48, + 0xcc, 0x4b, 0x4f, 0x8d, 0x2f, 0xc9, 0xcc, 0x4d, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x09, 0x12, + 0x41, 0xc8, 0x3a, 0x83, 0x25, 0x43, 0x32, 0x73, 0x53, 0x85, 0xd4, 0xb8, 0xf8, 0x73, 0x52, 0x13, + 0xcb, 0x52, 0xe3, 0x0b, 0xf2, 0xf3, 0x73, 0x20, 0xca, 0x99, 0xc0, 0xca, 0x79, 0xc1, 0xc2, 0x01, + 0xf9, 0xf9, 0x39, 0x20, 0x75, 0x4e, 0x6e, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, + 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, + 0x10, 0xa5, 0x93, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0xef, 0x1d, 0x19, + 0xe6, 0xea, 0x97, 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0xad, 0x9f, 0x9c, 0x91, 0x98, 0x99, 0xa7, 0x5f, + 0x01, 0xf7, 0x54, 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x33, 0xc6, 0x80, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x44, 0x61, 0x13, 0x9a, 0xf1, 0x00, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LeavePoolTime != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.LeavePoolTime)) + i-- + dAtA[i] = 0x10 + } + if m.CommissionChangeTime != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.CommissionChangeTime)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CommissionChangeTime != 0 { + n += 1 + sovParams(uint64(m.CommissionChangeTime)) + } + if m.LeavePoolTime != 0 { + n += 1 + sovParams(uint64(m.LeavePoolTime)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CommissionChangeTime", wireType) + } + m.CommissionChangeTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CommissionChangeTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeavePoolTime", wireType) + } + m.LeavePoolTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeavePoolTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/stakers/types/query.pb.go b/x/stakers/types/query.pb.go new file mode 100644 index 00000000..f7b98254 --- /dev/null +++ b/x/stakers/types/query.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/stakers/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6c1bf6f190db35c0, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params holds all the parameters of this module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6c1bf6f190db35c0, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "kyve.stakers.v1beta1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "kyve.stakers.v1beta1.QueryParamsResponse") +} + +func init() { proto.RegisterFile("kyve/stakers/v1beta1/query.proto", fileDescriptor_6c1bf6f190db35c0) } + +var fileDescriptor_6c1bf6f190db35c0 = []byte{ + // 289 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xc8, 0xae, 0x2c, 0x4b, + 0xd5, 0x2f, 0x2e, 0x49, 0xcc, 0x4e, 0x2d, 0x2a, 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd4, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x01, 0xa9, + 0xd0, 0x83, 0xaa, 0xd0, 0x83, 0xaa, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd0, 0x07, + 0xb1, 0x20, 0x6a, 0xa5, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, + 0x13, 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0xa1, 0xb2, 0x8a, 0x58, 0xed, + 0x2a, 0x48, 0x2c, 0x4a, 0xcc, 0x85, 0x2a, 0x51, 0x12, 0xe1, 0x12, 0x0a, 0x04, 0xd9, 0x1d, 0x00, + 0x16, 0x0c, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x51, 0x0a, 0xe4, 0x12, 0x46, 0x11, 0x2d, 0x2e, + 0xc8, 0xcf, 0x2b, 0x4e, 0x15, 0xb2, 0xe2, 0x62, 0x83, 0x68, 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, + 0x36, 0x92, 0xd1, 0xc3, 0xe6, 0x54, 0x3d, 0x88, 0x2e, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, + 0xa0, 0x3a, 0x8c, 0x7a, 0x19, 0xb9, 0x58, 0xc1, 0x66, 0x0a, 0x35, 0x33, 0x72, 0xb1, 0x41, 0x94, + 0x08, 0x69, 0x60, 0x37, 0x00, 0xd3, 0x45, 0x52, 0x9a, 0x44, 0xa8, 0x84, 0xb8, 0x52, 0x49, 0xa5, + 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x72, 0x42, 0x32, 0xfa, 0x78, 0xbc, 0xef, 0xe4, 0x76, 0xe2, 0x91, + 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, + 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x3a, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, + 0xc9, 0xf9, 0xb9, 0xfa, 0xde, 0x91, 0x61, 0xae, 0x7e, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, 0xfa, + 0xc9, 0x19, 0x89, 0x99, 0x79, 0xfa, 0x15, 0x70, 0x03, 0x4b, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, + 0xc0, 0xe1, 0x68, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x3e, 0x8d, 0x82, 0xd8, 0x01, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Parameters queries the parameters of the module. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Parameters queries the parameters of the module. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.stakers.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/stakers/v1beta1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/stakers/types/query.pb.gw.go b/x/stakers/types/query.pb.gw.go new file mode 100644 index 00000000..d727b58a --- /dev/null +++ b/x/stakers/types/query.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/stakers/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "stakers", "v1beta1", "params"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/x/stakers/types/stakers.pb.go b/x/stakers/types/stakers.pb.go new file mode 100644 index 00000000..11d4cc75 --- /dev/null +++ b/x/stakers/types/stakers.pb.go @@ -0,0 +1,1670 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/stakers/v1beta1/stakers.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Staker contains all metadata for a staker +// Every address can only create one staker (itself) +type Staker struct { + // address ... + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // commission ... + Commission string `protobuf:"bytes,2,opt,name=commission,proto3" json:"commission,omitempty"` + // moniker ... + Moniker string `protobuf:"bytes,3,opt,name=moniker,proto3" json:"moniker,omitempty"` + // website ... + Website string `protobuf:"bytes,4,opt,name=website,proto3" json:"website,omitempty"` + // logo ... + Logo string `protobuf:"bytes,5,opt,name=logo,proto3" json:"logo,omitempty"` +} + +func (m *Staker) Reset() { *m = Staker{} } +func (m *Staker) String() string { return proto.CompactTextString(m) } +func (*Staker) ProtoMessage() {} +func (*Staker) Descriptor() ([]byte, []int) { + return fileDescriptor_d209d1a2a74d375d, []int{0} +} +func (m *Staker) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Staker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Staker.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Staker) XXX_Merge(src proto.Message) { + xxx_messageInfo_Staker.Merge(m, src) +} +func (m *Staker) XXX_Size() int { + return m.Size() +} +func (m *Staker) XXX_DiscardUnknown() { + xxx_messageInfo_Staker.DiscardUnknown(m) +} + +var xxx_messageInfo_Staker proto.InternalMessageInfo + +func (m *Staker) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func (m *Staker) GetCommission() string { + if m != nil { + return m.Commission + } + return "" +} + +func (m *Staker) GetMoniker() string { + if m != nil { + return m.Moniker + } + return "" +} + +func (m *Staker) GetWebsite() string { + if m != nil { + return m.Website + } + return "" +} + +func (m *Staker) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +// Valaccount gets authorized by a staker to +// vote in a given pool by favor of the staker. +type Valaccount struct { + // pool_id defines the pool in which the address + // is allowed to vote in. + PoolId uint64 `protobuf:"varint,1,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // staker is the address the valaccount is voting for. + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // valaddress is the account stored on the protocol + // node which votes for the staker in the given pool + Valaddress string `protobuf:"bytes,3,opt,name=valaddress,proto3" json:"valaddress,omitempty"` + // When a node is inactive (does not vote at all) + // A point is added, after a certain amount of points + // is reached the node gets kicked out. + Points uint64 `protobuf:"varint,4,opt,name=points,proto3" json:"points,omitempty"` + // isLeaving indicates if a staker is leaving the given pool. + IsLeaving bool `protobuf:"varint,5,opt,name=is_leaving,json=isLeaving,proto3" json:"is_leaving,omitempty"` +} + +func (m *Valaccount) Reset() { *m = Valaccount{} } +func (m *Valaccount) String() string { return proto.CompactTextString(m) } +func (*Valaccount) ProtoMessage() {} +func (*Valaccount) Descriptor() ([]byte, []int) { + return fileDescriptor_d209d1a2a74d375d, []int{1} +} +func (m *Valaccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Valaccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Valaccount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Valaccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Valaccount.Merge(m, src) +} +func (m *Valaccount) XXX_Size() int { + return m.Size() +} +func (m *Valaccount) XXX_DiscardUnknown() { + xxx_messageInfo_Valaccount.DiscardUnknown(m) +} + +var xxx_messageInfo_Valaccount proto.InternalMessageInfo + +func (m *Valaccount) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *Valaccount) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *Valaccount) GetValaddress() string { + if m != nil { + return m.Valaddress + } + return "" +} + +func (m *Valaccount) GetPoints() uint64 { + if m != nil { + return m.Points + } + return 0 +} + +func (m *Valaccount) GetIsLeaving() bool { + if m != nil { + return m.IsLeaving + } + return false +} + +// CommissionChangeEntry stores the information for an +// upcoming commission change. A commission change is never +// instant, so delegators have time to redelegate in case +// they don't agree with the new commission. +type CommissionChangeEntry struct { + // index is needed for the queue-algorithm which + // processes the commission changes + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + // staker is the address of the affected staker + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // commission is the new commission which will + // be applied after the waiting time is over. + Commission string `protobuf:"bytes,3,opt,name=commission,proto3" json:"commission,omitempty"` + // creation_date is the UNIX-timestamp in seconds + // when the entry was created. + CreationDate int64 `protobuf:"varint,4,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` +} + +func (m *CommissionChangeEntry) Reset() { *m = CommissionChangeEntry{} } +func (m *CommissionChangeEntry) String() string { return proto.CompactTextString(m) } +func (*CommissionChangeEntry) ProtoMessage() {} +func (*CommissionChangeEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_d209d1a2a74d375d, []int{2} +} +func (m *CommissionChangeEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommissionChangeEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommissionChangeEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CommissionChangeEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommissionChangeEntry.Merge(m, src) +} +func (m *CommissionChangeEntry) XXX_Size() int { + return m.Size() +} +func (m *CommissionChangeEntry) XXX_DiscardUnknown() { + xxx_messageInfo_CommissionChangeEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_CommissionChangeEntry proto.InternalMessageInfo + +func (m *CommissionChangeEntry) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *CommissionChangeEntry) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *CommissionChangeEntry) GetCommission() string { + if m != nil { + return m.Commission + } + return "" +} + +func (m *CommissionChangeEntry) GetCreationDate() int64 { + if m != nil { + return m.CreationDate + } + return 0 +} + +// LeavePoolEntry stores the information for an upcoming +// pool leave. A staker can't leave a pool instantly. +// Instead a the `LeaveTime` needs to be awaited. +// If a staker start to leave a pool, it will be shown +// in the UI to the delegators. +type LeavePoolEntry struct { + // index is needed for the queue-algorithm which + // processes the commission changes + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + // staker is the address of the affected staker + Staker string `protobuf:"bytes,2,opt,name=staker,proto3" json:"staker,omitempty"` + // pool_id indicates the pool the staker wants to leave + PoolId uint64 `protobuf:"varint,3,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // creation_date is the UNIX-timestamp in seconds + // when the entry was created. + CreationDate int64 `protobuf:"varint,4,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` +} + +func (m *LeavePoolEntry) Reset() { *m = LeavePoolEntry{} } +func (m *LeavePoolEntry) String() string { return proto.CompactTextString(m) } +func (*LeavePoolEntry) ProtoMessage() {} +func (*LeavePoolEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_d209d1a2a74d375d, []int{3} +} +func (m *LeavePoolEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeavePoolEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeavePoolEntry.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeavePoolEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeavePoolEntry.Merge(m, src) +} +func (m *LeavePoolEntry) XXX_Size() int { + return m.Size() +} +func (m *LeavePoolEntry) XXX_DiscardUnknown() { + xxx_messageInfo_LeavePoolEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_LeavePoolEntry proto.InternalMessageInfo + +func (m *LeavePoolEntry) GetIndex() uint64 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *LeavePoolEntry) GetStaker() string { + if m != nil { + return m.Staker + } + return "" +} + +func (m *LeavePoolEntry) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *LeavePoolEntry) GetCreationDate() int64 { + if m != nil { + return m.CreationDate + } + return 0 +} + +// UnbondingState stores the state for the unbonding of stakes and delegations. +type QueueState struct { + // low_index is the tail of the queue. It is the + // oldest entry in the queue. If this entry isn't + // due, non of the other entries is. + LowIndex uint64 `protobuf:"varint,1,opt,name=low_index,json=lowIndex,proto3" json:"low_index,omitempty"` + // high_index is the head of the queue. New entries + // are added to the top. + HighIndex uint64 `protobuf:"varint,2,opt,name=high_index,json=highIndex,proto3" json:"high_index,omitempty"` +} + +func (m *QueueState) Reset() { *m = QueueState{} } +func (m *QueueState) String() string { return proto.CompactTextString(m) } +func (*QueueState) ProtoMessage() {} +func (*QueueState) Descriptor() ([]byte, []int) { + return fileDescriptor_d209d1a2a74d375d, []int{4} +} +func (m *QueueState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueueState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueueState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueueState) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueueState.Merge(m, src) +} +func (m *QueueState) XXX_Size() int { + return m.Size() +} +func (m *QueueState) XXX_DiscardUnknown() { + xxx_messageInfo_QueueState.DiscardUnknown(m) +} + +var xxx_messageInfo_QueueState proto.InternalMessageInfo + +func (m *QueueState) GetLowIndex() uint64 { + if m != nil { + return m.LowIndex + } + return 0 +} + +func (m *QueueState) GetHighIndex() uint64 { + if m != nil { + return m.HighIndex + } + return 0 +} + +func init() { + proto.RegisterType((*Staker)(nil), "kyve.stakers.v1beta1.Staker") + proto.RegisterType((*Valaccount)(nil), "kyve.stakers.v1beta1.Valaccount") + proto.RegisterType((*CommissionChangeEntry)(nil), "kyve.stakers.v1beta1.CommissionChangeEntry") + proto.RegisterType((*LeavePoolEntry)(nil), "kyve.stakers.v1beta1.LeavePoolEntry") + proto.RegisterType((*QueueState)(nil), "kyve.stakers.v1beta1.QueueState") +} + +func init() { + proto.RegisterFile("kyve/stakers/v1beta1/stakers.proto", fileDescriptor_d209d1a2a74d375d) +} + +var fileDescriptor_d209d1a2a74d375d = []byte{ + // 429 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x9b, 0x6d, 0xb7, 0xbb, 0x7d, 0xa8, 0x87, 0x61, 0xd5, 0x80, 0x6c, 0x90, 0x7a, 0xf1, + 0x20, 0x0d, 0x8b, 0xdf, 0xc0, 0x75, 0xc5, 0x45, 0x11, 0xed, 0xc2, 0x82, 0x5e, 0xca, 0x34, 0x79, + 0x24, 0x43, 0xa7, 0xf3, 0x42, 0x66, 0x92, 0x6c, 0xc1, 0x4f, 0xe0, 0xc9, 0x83, 0x1f, 0xca, 0xe3, + 0x1e, 0x3d, 0x4a, 0xfb, 0x45, 0x64, 0x26, 0x93, 0x12, 0x11, 0x44, 0xbc, 0xe5, 0xff, 0xff, 0xcf, + 0xcb, 0xfc, 0xde, 0x9f, 0x81, 0xe9, 0x6a, 0x53, 0x63, 0xac, 0x0d, 0x5f, 0x61, 0xa9, 0xe3, 0xfa, + 0x6c, 0x89, 0x86, 0x9f, 0x75, 0x7a, 0x56, 0x94, 0x64, 0x88, 0x9d, 0xd8, 0x33, 0xb3, 0xce, 0xf3, + 0x67, 0xa6, 0x5f, 0x02, 0x18, 0x5f, 0x39, 0x8f, 0x85, 0x70, 0xc4, 0xd3, 0xb4, 0x44, 0xad, 0xc3, + 0xe0, 0x71, 0xf0, 0x74, 0x32, 0xef, 0x24, 0x8b, 0x00, 0x12, 0x5a, 0xaf, 0x85, 0xd6, 0x82, 0x54, + 0x78, 0xe0, 0xc2, 0x9e, 0x63, 0x27, 0xd7, 0xa4, 0xc4, 0x0a, 0xcb, 0x70, 0xd8, 0x4e, 0x7a, 0x69, + 0x93, 0x06, 0x97, 0x5a, 0x18, 0x0c, 0x47, 0x6d, 0xe2, 0x25, 0x63, 0x30, 0x92, 0x94, 0x51, 0x78, + 0xe8, 0x6c, 0xf7, 0x3d, 0xfd, 0x16, 0x00, 0x5c, 0x73, 0xc9, 0x93, 0x84, 0x2a, 0x65, 0xd8, 0x43, + 0x38, 0x2a, 0x88, 0xe4, 0x42, 0xa4, 0x0e, 0x68, 0x34, 0x1f, 0x5b, 0x79, 0x99, 0xb2, 0x07, 0x30, + 0x6e, 0xf7, 0xf0, 0x2c, 0x5e, 0x59, 0xce, 0x9a, 0xcb, 0x6e, 0x89, 0x16, 0xa5, 0xe7, 0xd8, 0xb9, + 0x82, 0x84, 0x32, 0xda, 0xc1, 0xb8, 0xff, 0x59, 0xc5, 0x4e, 0x01, 0x84, 0x5e, 0x48, 0xe4, 0xb5, + 0x50, 0x99, 0x23, 0x3a, 0x9e, 0x4f, 0x84, 0x7e, 0xdb, 0x1a, 0xb6, 0xa3, 0xfb, 0xe7, 0xfb, 0x6d, + 0xcf, 0x73, 0xae, 0x32, 0xbc, 0x50, 0xa6, 0xdc, 0xb0, 0x13, 0x38, 0x14, 0x2a, 0xc5, 0x1b, 0xcf, + 0xd7, 0x8a, 0xbf, 0xe1, 0xf5, 0x6a, 0x1c, 0xfe, 0x51, 0xe3, 0x13, 0xb8, 0x9b, 0x94, 0xc8, 0x8d, + 0x20, 0xb5, 0x48, 0xb9, 0xaf, 0x6c, 0x38, 0xbf, 0xd3, 0x99, 0x2f, 0xb9, 0xc1, 0xe9, 0x67, 0xb8, + 0x67, 0xb9, 0xf0, 0x3d, 0x91, 0xfc, 0x1f, 0x88, 0x5e, 0xa9, 0xc3, 0xdf, 0x4a, 0xfd, 0xa7, 0xdb, + 0x5f, 0x03, 0x7c, 0xa8, 0xb0, 0xc2, 0x2b, 0xc3, 0x0d, 0xb2, 0x47, 0x30, 0x91, 0xd4, 0x2c, 0xfa, + 0xb7, 0x1f, 0x4b, 0x6a, 0x2e, 0x1d, 0xc0, 0x29, 0x40, 0x2e, 0xb2, 0xdc, 0xa7, 0x07, 0x2e, 0x9d, + 0x58, 0xc7, 0xc5, 0x2f, 0x5e, 0x7d, 0xdf, 0x46, 0xc1, 0xed, 0x36, 0x0a, 0x7e, 0x6e, 0xa3, 0xe0, + 0xeb, 0x2e, 0x1a, 0xdc, 0xee, 0xa2, 0xc1, 0x8f, 0x5d, 0x34, 0xf8, 0xf4, 0x2c, 0x13, 0x26, 0xaf, + 0x96, 0xb3, 0x84, 0xd6, 0xf1, 0x9b, 0x8f, 0xd7, 0x17, 0xef, 0xd0, 0x34, 0x54, 0xae, 0xe2, 0x24, + 0xe7, 0x42, 0xc5, 0x37, 0xfb, 0x67, 0x6e, 0x36, 0x05, 0xea, 0xe5, 0xd8, 0xbd, 0xee, 0xe7, 0xbf, + 0x02, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x34, 0x43, 0xbf, 0x03, 0x03, 0x00, 0x00, +} + +func (m *Staker) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Staker) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Staker) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x2a + } + if len(m.Website) > 0 { + i -= len(m.Website) + copy(dAtA[i:], m.Website) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Website))) + i-- + dAtA[i] = 0x22 + } + if len(m.Moniker) > 0 { + i -= len(m.Moniker) + copy(dAtA[i:], m.Moniker) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Moniker))) + i-- + dAtA[i] = 0x1a + } + if len(m.Commission) > 0 { + i -= len(m.Commission) + copy(dAtA[i:], m.Commission) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Commission))) + i-- + dAtA[i] = 0x12 + } + if len(m.Address) > 0 { + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Valaccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Valaccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Valaccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsLeaving { + i-- + if m.IsLeaving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Points != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.Points)) + i-- + dAtA[i] = 0x20 + } + if len(m.Valaddress) > 0 { + i -= len(m.Valaddress) + copy(dAtA[i:], m.Valaddress) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Valaddress))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.PoolId != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CommissionChangeEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommissionChangeEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommissionChangeEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreationDate != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.CreationDate)) + i-- + dAtA[i] = 0x20 + } + if len(m.Commission) > 0 { + i -= len(m.Commission) + copy(dAtA[i:], m.Commission) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Commission))) + i-- + dAtA[i] = 0x1a + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LeavePoolEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeavePoolEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeavePoolEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreationDate != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.CreationDate)) + i-- + dAtA[i] = 0x20 + } + if m.PoolId != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x18 + } + if len(m.Staker) > 0 { + i -= len(m.Staker) + copy(dAtA[i:], m.Staker) + i = encodeVarintStakers(dAtA, i, uint64(len(m.Staker))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueueState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueueState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueueState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HighIndex != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.HighIndex)) + i-- + dAtA[i] = 0x10 + } + if m.LowIndex != 0 { + i = encodeVarintStakers(dAtA, i, uint64(m.LowIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintStakers(dAtA []byte, offset int, v uint64) int { + offset -= sovStakers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Staker) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + l = len(m.Commission) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + l = len(m.Moniker) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + l = len(m.Website) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + return n +} + +func (m *Valaccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PoolId != 0 { + n += 1 + sovStakers(uint64(m.PoolId)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + l = len(m.Valaddress) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + if m.Points != 0 { + n += 1 + sovStakers(uint64(m.Points)) + } + if m.IsLeaving { + n += 2 + } + return n +} + +func (m *CommissionChangeEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovStakers(uint64(m.Index)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + l = len(m.Commission) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + if m.CreationDate != 0 { + n += 1 + sovStakers(uint64(m.CreationDate)) + } + return n +} + +func (m *LeavePoolEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovStakers(uint64(m.Index)) + } + l = len(m.Staker) + if l > 0 { + n += 1 + l + sovStakers(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovStakers(uint64(m.PoolId)) + } + if m.CreationDate != 0 { + n += 1 + sovStakers(uint64(m.CreationDate)) + } + return n +} + +func (m *QueueState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LowIndex != 0 { + n += 1 + sovStakers(uint64(m.LowIndex)) + } + if m.HighIndex != 0 { + n += 1 + sovStakers(uint64(m.HighIndex)) + } + return n +} + +func sovStakers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStakers(x uint64) (n int) { + return sovStakers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Staker) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Staker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Staker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commission", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commission = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Moniker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Website = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Valaccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Valaccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Valaccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Valaddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Valaddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Points", wireType) + } + m.Points = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Points |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsLeaving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsLeaving = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommissionChangeEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommissionChangeEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommissionChangeEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commission", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commission = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationDate", wireType) + } + m.CreationDate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationDate |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeavePoolEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeavePoolEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeavePoolEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Staker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStakers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStakers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Staker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationDate", wireType) + } + m.CreationDate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreationDate |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueueState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueueState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueueState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LowIndex", wireType) + } + m.LowIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LowIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HighIndex", wireType) + } + m.HighIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStakers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HighIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStakers(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStakers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStakers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStakers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStakers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStakers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStakers + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStakers + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStakers + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStakers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStakers = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStakers = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/stakers/types/tx.pb.go b/x/stakers/types/tx.pb.go new file mode 100644 index 00000000..5058724c --- /dev/null +++ b/x/stakers/types/tx.pb.go @@ -0,0 +1,2658 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/stakers/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgStakePool defines a SDK message for staking in a pool. +type MsgCreateStaker struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgCreateStaker) Reset() { *m = MsgCreateStaker{} } +func (m *MsgCreateStaker) String() string { return proto.CompactTextString(m) } +func (*MsgCreateStaker) ProtoMessage() {} +func (*MsgCreateStaker) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{0} +} +func (m *MsgCreateStaker) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateStaker) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateStaker.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateStaker) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateStaker.Merge(m, src) +} +func (m *MsgCreateStaker) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateStaker) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateStaker.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateStaker proto.InternalMessageInfo + +func (m *MsgCreateStaker) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgCreateStaker) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgStakePoolResponse defines the Msg/StakePool response type. +type MsgCreateStakerResponse struct { +} + +func (m *MsgCreateStakerResponse) Reset() { *m = MsgCreateStakerResponse{} } +func (m *MsgCreateStakerResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateStakerResponse) ProtoMessage() {} +func (*MsgCreateStakerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{1} +} +func (m *MsgCreateStakerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateStakerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateStakerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateStakerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateStakerResponse.Merge(m, src) +} +func (m *MsgCreateStakerResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateStakerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateStakerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateStakerResponse proto.InternalMessageInfo + +// MsgUpdateMetadata defines a SDK message for claiming the uploader role. +type MsgUpdateMetadata struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // moniker ... + Moniker string `protobuf:"bytes,2,opt,name=moniker,proto3" json:"moniker,omitempty"` + // website ... + Website string `protobuf:"bytes,3,opt,name=website,proto3" json:"website,omitempty"` + // logo + Logo string `protobuf:"bytes,4,opt,name=logo,proto3" json:"logo,omitempty"` +} + +func (m *MsgUpdateMetadata) Reset() { *m = MsgUpdateMetadata{} } +func (m *MsgUpdateMetadata) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateMetadata) ProtoMessage() {} +func (*MsgUpdateMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{2} +} +func (m *MsgUpdateMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateMetadata.Merge(m, src) +} +func (m *MsgUpdateMetadata) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateMetadata proto.InternalMessageInfo + +func (m *MsgUpdateMetadata) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgUpdateMetadata) GetMoniker() string { + if m != nil { + return m.Moniker + } + return "" +} + +func (m *MsgUpdateMetadata) GetWebsite() string { + if m != nil { + return m.Website + } + return "" +} + +func (m *MsgUpdateMetadata) GetLogo() string { + if m != nil { + return m.Logo + } + return "" +} + +// MsgUpdateMetadataResponse defines the Msg/MsgUpdateMetadata response type. +type MsgUpdateMetadataResponse struct { +} + +func (m *MsgUpdateMetadataResponse) Reset() { *m = MsgUpdateMetadataResponse{} } +func (m *MsgUpdateMetadataResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateMetadataResponse) ProtoMessage() {} +func (*MsgUpdateMetadataResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{3} +} +func (m *MsgUpdateMetadataResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateMetadataResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateMetadataResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateMetadataResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateMetadataResponse.Merge(m, src) +} +func (m *MsgUpdateMetadataResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateMetadataResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateMetadataResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateMetadataResponse proto.InternalMessageInfo + +// MsgUpdateCommission ... +type MsgUpdateCommission struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // commission ... + Commission string `protobuf:"bytes,2,opt,name=commission,proto3" json:"commission,omitempty"` +} + +func (m *MsgUpdateCommission) Reset() { *m = MsgUpdateCommission{} } +func (m *MsgUpdateCommission) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateCommission) ProtoMessage() {} +func (*MsgUpdateCommission) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{4} +} +func (m *MsgUpdateCommission) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateCommission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateCommission.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateCommission) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateCommission.Merge(m, src) +} +func (m *MsgUpdateCommission) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateCommission) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateCommission.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateCommission proto.InternalMessageInfo + +func (m *MsgUpdateCommission) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgUpdateCommission) GetCommission() string { + if m != nil { + return m.Commission + } + return "" +} + +// MsgUpdateCommissionResponse ... +type MsgUpdateCommissionResponse struct { +} + +func (m *MsgUpdateCommissionResponse) Reset() { *m = MsgUpdateCommissionResponse{} } +func (m *MsgUpdateCommissionResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateCommissionResponse) ProtoMessage() {} +func (*MsgUpdateCommissionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{5} +} +func (m *MsgUpdateCommissionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateCommissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateCommissionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateCommissionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateCommissionResponse.Merge(m, src) +} +func (m *MsgUpdateCommissionResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateCommissionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateCommissionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateCommissionResponse proto.InternalMessageInfo + +// MsgJoinPool ... +type MsgJoinPool struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // pool_id ... + PoolId uint64 `protobuf:"varint,2,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` + // valaddress ... + Valaddress string `protobuf:"bytes,3,opt,name=valaddress,proto3" json:"valaddress,omitempty"` + // amount ... + Amount uint64 `protobuf:"varint,4,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *MsgJoinPool) Reset() { *m = MsgJoinPool{} } +func (m *MsgJoinPool) String() string { return proto.CompactTextString(m) } +func (*MsgJoinPool) ProtoMessage() {} +func (*MsgJoinPool) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{6} +} +func (m *MsgJoinPool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgJoinPool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgJoinPool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgJoinPool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgJoinPool.Merge(m, src) +} +func (m *MsgJoinPool) XXX_Size() int { + return m.Size() +} +func (m *MsgJoinPool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgJoinPool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgJoinPool proto.InternalMessageInfo + +func (m *MsgJoinPool) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgJoinPool) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +func (m *MsgJoinPool) GetValaddress() string { + if m != nil { + return m.Valaddress + } + return "" +} + +func (m *MsgJoinPool) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// MsgJoinPoolResponse ... +type MsgJoinPoolResponse struct { +} + +func (m *MsgJoinPoolResponse) Reset() { *m = MsgJoinPoolResponse{} } +func (m *MsgJoinPoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgJoinPoolResponse) ProtoMessage() {} +func (*MsgJoinPoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{7} +} +func (m *MsgJoinPoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgJoinPoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgJoinPoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgJoinPoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgJoinPoolResponse.Merge(m, src) +} +func (m *MsgJoinPoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgJoinPoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgJoinPoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgJoinPoolResponse proto.InternalMessageInfo + +// MsgLeavePool ... +type MsgLeavePool struct { + // creator ... + Creator string `protobuf:"bytes,1,opt,name=creator,proto3" json:"creator,omitempty"` + // pool_id ... + PoolId uint64 `protobuf:"varint,2,opt,name=pool_id,json=poolId,proto3" json:"pool_id,omitempty"` +} + +func (m *MsgLeavePool) Reset() { *m = MsgLeavePool{} } +func (m *MsgLeavePool) String() string { return proto.CompactTextString(m) } +func (*MsgLeavePool) ProtoMessage() {} +func (*MsgLeavePool) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{8} +} +func (m *MsgLeavePool) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgLeavePool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgLeavePool.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgLeavePool) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgLeavePool.Merge(m, src) +} +func (m *MsgLeavePool) XXX_Size() int { + return m.Size() +} +func (m *MsgLeavePool) XXX_DiscardUnknown() { + xxx_messageInfo_MsgLeavePool.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgLeavePool proto.InternalMessageInfo + +func (m *MsgLeavePool) GetCreator() string { + if m != nil { + return m.Creator + } + return "" +} + +func (m *MsgLeavePool) GetPoolId() uint64 { + if m != nil { + return m.PoolId + } + return 0 +} + +// MsgReactivateStakerResponse ... +type MsgLeavePoolResponse struct { +} + +func (m *MsgLeavePoolResponse) Reset() { *m = MsgLeavePoolResponse{} } +func (m *MsgLeavePoolResponse) String() string { return proto.CompactTextString(m) } +func (*MsgLeavePoolResponse) ProtoMessage() {} +func (*MsgLeavePoolResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{9} +} +func (m *MsgLeavePoolResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgLeavePoolResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgLeavePoolResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgLeavePoolResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgLeavePoolResponse.Merge(m, src) +} +func (m *MsgLeavePoolResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgLeavePoolResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgLeavePoolResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgLeavePoolResponse proto.InternalMessageInfo + +// MsgUpdateParams defines a SDK message for updating the module parameters. +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // payload defines the x/stakers parameters to update. + Payload string `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{10} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetPayload() string { + if m != nil { + return m.Payload + } + return "" +} + +// MsgUpdateParamsResponse defines the Msg/UpdateParams response type. +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f52b730e69b9fb06, []int{11} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateStaker)(nil), "kyve.stakers.v1beta1.MsgCreateStaker") + proto.RegisterType((*MsgCreateStakerResponse)(nil), "kyve.stakers.v1beta1.MsgCreateStakerResponse") + proto.RegisterType((*MsgUpdateMetadata)(nil), "kyve.stakers.v1beta1.MsgUpdateMetadata") + proto.RegisterType((*MsgUpdateMetadataResponse)(nil), "kyve.stakers.v1beta1.MsgUpdateMetadataResponse") + proto.RegisterType((*MsgUpdateCommission)(nil), "kyve.stakers.v1beta1.MsgUpdateCommission") + proto.RegisterType((*MsgUpdateCommissionResponse)(nil), "kyve.stakers.v1beta1.MsgUpdateCommissionResponse") + proto.RegisterType((*MsgJoinPool)(nil), "kyve.stakers.v1beta1.MsgJoinPool") + proto.RegisterType((*MsgJoinPoolResponse)(nil), "kyve.stakers.v1beta1.MsgJoinPoolResponse") + proto.RegisterType((*MsgLeavePool)(nil), "kyve.stakers.v1beta1.MsgLeavePool") + proto.RegisterType((*MsgLeavePoolResponse)(nil), "kyve.stakers.v1beta1.MsgLeavePoolResponse") + proto.RegisterType((*MsgUpdateParams)(nil), "kyve.stakers.v1beta1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "kyve.stakers.v1beta1.MsgUpdateParamsResponse") +} + +func init() { proto.RegisterFile("kyve/stakers/v1beta1/tx.proto", fileDescriptor_f52b730e69b9fb06) } + +var fileDescriptor_f52b730e69b9fb06 = []byte{ + // 568 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcb, 0x6e, 0xd3, 0x40, + 0x14, 0xad, 0x69, 0xd4, 0x90, 0x4b, 0xc4, 0xc3, 0x84, 0xd6, 0x71, 0x55, 0xab, 0x58, 0xaa, 0x68, + 0x11, 0xb5, 0x15, 0x90, 0xd8, 0xb7, 0x11, 0x48, 0x3c, 0x0c, 0x55, 0x2a, 0x10, 0x8f, 0x45, 0x35, + 0xb1, 0x47, 0x8e, 0x49, 0xec, 0x6b, 0x79, 0x26, 0x69, 0xf2, 0x17, 0x7c, 0x0c, 0x1f, 0xc1, 0xb2, + 0x62, 0xc5, 0x12, 0x25, 0x12, 0xdf, 0x81, 0xfc, 0x9a, 0x3c, 0x9a, 0x97, 0xd8, 0xe5, 0xcc, 0x3d, + 0xf7, 0x9c, 0x7b, 0x3d, 0x27, 0x03, 0x7b, 0xed, 0x41, 0x8f, 0x9a, 0x8c, 0x93, 0x36, 0x8d, 0x98, + 0xd9, 0xab, 0x35, 0x29, 0x27, 0x35, 0x93, 0xf7, 0x8d, 0x30, 0x42, 0x8e, 0x72, 0x25, 0x2e, 0x1b, + 0x59, 0xd9, 0xc8, 0xca, 0x6a, 0xd5, 0x46, 0xe6, 0x23, 0xbb, 0x48, 0x38, 0x66, 0x0a, 0xd2, 0x06, + 0xbd, 0x0e, 0x77, 0x2c, 0xe6, 0xd6, 0x23, 0x4a, 0x38, 0x3d, 0x4f, 0xda, 0x64, 0x05, 0x8a, 0x76, + 0x8c, 0x31, 0x52, 0xa4, 0x7d, 0xe9, 0xb0, 0xd4, 0xc8, 0xa1, 0xbc, 0x0d, 0x5b, 0xc4, 0xc7, 0x6e, + 0xc0, 0x95, 0x1b, 0xfb, 0xd2, 0x61, 0xa1, 0x91, 0x21, 0xbd, 0x0a, 0x3b, 0x33, 0x22, 0x0d, 0xca, + 0x42, 0x0c, 0x18, 0xd5, 0xbb, 0x70, 0xcf, 0x62, 0xee, 0x87, 0xd0, 0x21, 0x9c, 0x5a, 0x94, 0x13, + 0x87, 0x70, 0xb2, 0xc4, 0x41, 0x81, 0xa2, 0x8f, 0x81, 0xd7, 0xa6, 0x51, 0x62, 0x51, 0x6a, 0xe4, + 0x30, 0xae, 0x5c, 0xd2, 0x26, 0xf3, 0x38, 0x55, 0x36, 0xd3, 0x4a, 0x06, 0x65, 0x19, 0x0a, 0x1d, + 0x74, 0x51, 0x29, 0x24, 0xc7, 0xc9, 0x6f, 0x7d, 0x17, 0xaa, 0xd7, 0x6c, 0xc5, 0x4c, 0xef, 0xe1, + 0xbe, 0x28, 0xd6, 0xd1, 0xf7, 0x3d, 0xc6, 0x3c, 0x0c, 0x96, 0x4c, 0xa5, 0x01, 0xd8, 0x82, 0x97, + 0x0d, 0x36, 0x71, 0xa2, 0xef, 0xc1, 0xee, 0x1c, 0x41, 0xe1, 0xd7, 0x87, 0x5b, 0x16, 0x73, 0x5f, + 0xa3, 0x17, 0x9c, 0x21, 0x76, 0x96, 0xf8, 0xec, 0x40, 0x31, 0x44, 0xec, 0x5c, 0x78, 0x4e, 0xfe, + 0x81, 0x63, 0xf8, 0xca, 0x89, 0x07, 0xe8, 0x91, 0x0e, 0x71, 0x9c, 0x88, 0x32, 0x96, 0xed, 0x3f, + 0x71, 0x32, 0x71, 0x31, 0x85, 0xa9, 0x8b, 0x79, 0x90, 0x6c, 0x9a, 0x3b, 0x8b, 0x81, 0x4e, 0xa0, + 0x6c, 0x31, 0xf7, 0x2d, 0x25, 0x3d, 0xfa, 0x9f, 0x13, 0xe9, 0xdb, 0x50, 0x99, 0x94, 0x10, 0xd2, + 0x76, 0x92, 0xa7, 0xf4, 0x53, 0x9c, 0x91, 0x88, 0xf8, 0x4c, 0x7e, 0x0e, 0x25, 0xd2, 0xe5, 0x2d, + 0x8c, 0x3c, 0x3e, 0x48, 0xf5, 0x4f, 0x95, 0x5f, 0x3f, 0x8e, 0x2b, 0x59, 0x0e, 0x4f, 0xd2, 0x1d, + 0xce, 0x79, 0xe4, 0x05, 0x6e, 0x63, 0x4c, 0x8d, 0xa7, 0x0a, 0xc9, 0xa0, 0x83, 0xc4, 0xc9, 0xb3, + 0x90, 0xc1, 0x2c, 0x6f, 0x93, 0x26, 0xb9, 0xff, 0xd3, 0xbf, 0x05, 0xd8, 0xb4, 0x98, 0x2b, 0x3b, + 0x50, 0x9e, 0x0a, 0xf5, 0x81, 0x31, 0xef, 0x9f, 0x61, 0xcc, 0xc4, 0x56, 0x3d, 0x5e, 0x8b, 0x96, + 0xbb, 0xc9, 0xdf, 0xe0, 0xf6, 0x4c, 0xb4, 0x1f, 0x2d, 0x14, 0x98, 0x26, 0xaa, 0xe6, 0x9a, 0x44, + 0xe1, 0x15, 0xc2, 0xdd, 0x6b, 0x91, 0x3d, 0x5a, 0x21, 0x32, 0xa6, 0xaa, 0xb5, 0xb5, 0xa9, 0xc2, + 0xf1, 0x13, 0xdc, 0x14, 0xa1, 0x7d, 0xb8, 0xb0, 0x3d, 0xa7, 0xa8, 0x47, 0x2b, 0x29, 0x42, 0xf9, + 0x2b, 0x94, 0xc6, 0xe9, 0xd3, 0x17, 0xf6, 0x09, 0x8e, 0xfa, 0x78, 0x35, 0x47, 0x88, 0x3b, 0x50, + 0x9e, 0xca, 0xdf, 0xc1, 0x8a, 0xcd, 0x53, 0xda, 0x92, 0xab, 0x9f, 0x17, 0xb4, 0xd3, 0x97, 0x3f, + 0x87, 0x9a, 0x74, 0x35, 0xd4, 0xa4, 0x3f, 0x43, 0x4d, 0xfa, 0x3e, 0xd2, 0x36, 0xae, 0x46, 0xda, + 0xc6, 0xef, 0x91, 0xb6, 0xf1, 0xe5, 0x89, 0xeb, 0xf1, 0x56, 0xb7, 0x69, 0xd8, 0xe8, 0x9b, 0x6f, + 0x3e, 0x7f, 0x7c, 0xf1, 0x8e, 0xf2, 0x4b, 0x8c, 0xda, 0xa6, 0xdd, 0x22, 0x5e, 0x60, 0xf6, 0xc5, + 0xe3, 0xcd, 0x07, 0x21, 0x65, 0xcd, 0xad, 0xe4, 0x1d, 0x7e, 0xf6, 0x2f, 0x00, 0x00, 0xff, 0xff, + 0x89, 0xab, 0x3b, 0x78, 0xd9, 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateStaker ... + CreateStaker(ctx context.Context, in *MsgCreateStaker, opts ...grpc.CallOption) (*MsgCreateStakerResponse, error) + // UpdateMetadata ... + UpdateMetadata(ctx context.Context, in *MsgUpdateMetadata, opts ...grpc.CallOption) (*MsgUpdateMetadataResponse, error) + // UpdateCommission ... + UpdateCommission(ctx context.Context, in *MsgUpdateCommission, opts ...grpc.CallOption) (*MsgUpdateCommissionResponse, error) + // JoinPool ... + JoinPool(ctx context.Context, in *MsgJoinPool, opts ...grpc.CallOption) (*MsgJoinPoolResponse, error) + // LeavePool ... + LeavePool(ctx context.Context, in *MsgLeavePool, opts ...grpc.CallOption) (*MsgLeavePoolResponse, error) + // UpdateParams defines a governance operation for updating the x/stakers module + // parameters. The authority is hard-coded to the x/gov module account. + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateStaker(ctx context.Context, in *MsgCreateStaker, opts ...grpc.CallOption) (*MsgCreateStakerResponse, error) { + out := new(MsgCreateStakerResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Msg/CreateStaker", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateMetadata(ctx context.Context, in *MsgUpdateMetadata, opts ...grpc.CallOption) (*MsgUpdateMetadataResponse, error) { + out := new(MsgUpdateMetadataResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Msg/UpdateMetadata", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateCommission(ctx context.Context, in *MsgUpdateCommission, opts ...grpc.CallOption) (*MsgUpdateCommissionResponse, error) { + out := new(MsgUpdateCommissionResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Msg/UpdateCommission", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) JoinPool(ctx context.Context, in *MsgJoinPool, opts ...grpc.CallOption) (*MsgJoinPoolResponse, error) { + out := new(MsgJoinPoolResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Msg/JoinPool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) LeavePool(ctx context.Context, in *MsgLeavePool, opts ...grpc.CallOption) (*MsgLeavePoolResponse, error) { + out := new(MsgLeavePoolResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Msg/LeavePool", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/kyve.stakers.v1beta1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateStaker ... + CreateStaker(context.Context, *MsgCreateStaker) (*MsgCreateStakerResponse, error) + // UpdateMetadata ... + UpdateMetadata(context.Context, *MsgUpdateMetadata) (*MsgUpdateMetadataResponse, error) + // UpdateCommission ... + UpdateCommission(context.Context, *MsgUpdateCommission) (*MsgUpdateCommissionResponse, error) + // JoinPool ... + JoinPool(context.Context, *MsgJoinPool) (*MsgJoinPoolResponse, error) + // LeavePool ... + LeavePool(context.Context, *MsgLeavePool) (*MsgLeavePoolResponse, error) + // UpdateParams defines a governance operation for updating the x/stakers module + // parameters. The authority is hard-coded to the x/gov module account. + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateStaker(ctx context.Context, req *MsgCreateStaker) (*MsgCreateStakerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateStaker not implemented") +} +func (*UnimplementedMsgServer) UpdateMetadata(ctx context.Context, req *MsgUpdateMetadata) (*MsgUpdateMetadataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateMetadata not implemented") +} +func (*UnimplementedMsgServer) UpdateCommission(ctx context.Context, req *MsgUpdateCommission) (*MsgUpdateCommissionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateCommission not implemented") +} +func (*UnimplementedMsgServer) JoinPool(ctx context.Context, req *MsgJoinPool) (*MsgJoinPoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method JoinPool not implemented") +} +func (*UnimplementedMsgServer) LeavePool(ctx context.Context, req *MsgLeavePool) (*MsgLeavePoolResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LeavePool not implemented") +} +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateStaker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateStaker) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateStaker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Msg/CreateStaker", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateStaker(ctx, req.(*MsgCreateStaker)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateMetadata) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateMetadata(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Msg/UpdateMetadata", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateMetadata(ctx, req.(*MsgUpdateMetadata)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateCommission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateCommission) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateCommission(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Msg/UpdateCommission", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateCommission(ctx, req.(*MsgUpdateCommission)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_JoinPool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgJoinPool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).JoinPool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Msg/JoinPool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).JoinPool(ctx, req.(*MsgJoinPool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_LeavePool_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgLeavePool) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).LeavePool(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Msg/LeavePool", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).LeavePool(ctx, req.(*MsgLeavePool)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.stakers.v1beta1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.stakers.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateStaker", + Handler: _Msg_CreateStaker_Handler, + }, + { + MethodName: "UpdateMetadata", + Handler: _Msg_UpdateMetadata_Handler, + }, + { + MethodName: "UpdateCommission", + Handler: _Msg_UpdateCommission_Handler, + }, + { + MethodName: "JoinPool", + Handler: _Msg_JoinPool_Handler, + }, + { + MethodName: "LeavePool", + Handler: _Msg_LeavePool_Handler, + }, + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/stakers/v1beta1/tx.proto", +} + +func (m *MsgCreateStaker) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateStaker) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateStaker) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateStakerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateStakerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateStakerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Logo) > 0 { + i -= len(m.Logo) + copy(dAtA[i:], m.Logo) + i = encodeVarintTx(dAtA, i, uint64(len(m.Logo))) + i-- + dAtA[i] = 0x22 + } + if len(m.Website) > 0 { + i -= len(m.Website) + copy(dAtA[i:], m.Website) + i = encodeVarintTx(dAtA, i, uint64(len(m.Website))) + i-- + dAtA[i] = 0x1a + } + if len(m.Moniker) > 0 { + i -= len(m.Moniker) + copy(dAtA[i:], m.Moniker) + i = encodeVarintTx(dAtA, i, uint64(len(m.Moniker))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateMetadataResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateMetadataResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateMetadataResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateCommission) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateCommission) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateCommission) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Commission) > 0 { + i -= len(m.Commission) + copy(dAtA[i:], m.Commission) + i = encodeVarintTx(dAtA, i, uint64(len(m.Commission))) + i-- + dAtA[i] = 0x12 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateCommissionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateCommissionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateCommissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgJoinPool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgJoinPool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgJoinPool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x20 + } + if len(m.Valaddress) > 0 { + i -= len(m.Valaddress) + copy(dAtA[i:], m.Valaddress) + i = encodeVarintTx(dAtA, i, uint64(len(m.Valaddress))) + i-- + dAtA[i] = 0x1a + } + if m.PoolId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x10 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgJoinPoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgJoinPoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgJoinPoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgLeavePool) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgLeavePool) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgLeavePool) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PoolId != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.PoolId)) + i-- + dAtA[i] = 0x10 + } + if len(m.Creator) > 0 { + i -= len(m.Creator) + copy(dAtA[i:], m.Creator) + i = encodeVarintTx(dAtA, i, uint64(len(m.Creator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgLeavePoolResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgLeavePoolResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgLeavePoolResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarintTx(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateStaker) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgCreateStakerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Moniker) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Website) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Logo) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateMetadataResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateCommission) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Commission) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateCommissionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgJoinPool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovTx(uint64(m.PoolId)) + } + l = len(m.Valaddress) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + return n +} + +func (m *MsgJoinPoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgLeavePool) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Creator) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.PoolId != 0 { + n += 1 + sovTx(uint64(m.PoolId)) + } + return n +} + +func (m *MsgLeavePoolResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateStaker) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateStaker: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateStaker: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateStakerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateStakerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateStakerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Moniker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Website = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Logo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Logo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateMetadataResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateMetadataResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateMetadataResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateCommission) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateCommission: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateCommission: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commission", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commission = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateCommissionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateCommissionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateCommissionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgJoinPool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgJoinPool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgJoinPool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Valaddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Valaddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgJoinPoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgJoinPoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgJoinPoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgLeavePool) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgLeavePool: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgLeavePool: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Creator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PoolId", wireType) + } + m.PoolId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PoolId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgLeavePoolResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgLeavePoolResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgLeavePoolResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/stakers/types/types.go b/x/stakers/types/types.go new file mode 100644 index 00000000..ab1254f4 --- /dev/null +++ b/x/stakers/types/types.go @@ -0,0 +1 @@ +package types diff --git a/x/team/abci.go b/x/team/abci.go new file mode 100644 index 00000000..06c72bc3 --- /dev/null +++ b/x/team/abci.go @@ -0,0 +1,84 @@ +package team + +import ( + "fmt" + + "github.com/KYVENetwork/chain/util" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + authTypes "github.com/cosmos/cosmos-sdk/x/auth/types" + + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // Mint + mintKeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + // Team + "github.com/KYVENetwork/chain/x/team/keeper" + // Upgrade + upgradeKeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" +) + +func DistributeTeamInflation(ctx sdk.Context, bk bankKeeper.Keeper, mk mintKeeper.Keeper, tk keeper.Keeper, uk upgradeKeeper.Keeper) { + // Compute team allocation of minted coins. + minter := mk.GetMinter(ctx) + params := mk.GetParams(ctx) + + // get total inflation rewards for current block + blockProvision := minter.BlockProvision(params) + + // calculate theoretical team balance. We don't use team module balance because a third party could skew + // the team inflation rewards by simply transferring funds to the team module account + teamBalance := tk.GetTeamInfo(ctx).RequiredModuleBalance + + // calculate total inflation rewards for team module. + // We subtract current inflation because it was already applied to the total supply because BeginBlocker + // x/mint runs before this method + totalSupply := bk.GetSupply(ctx, blockProvision.Denom).Amount.Int64() - blockProvision.Amount.Int64() + teamModuleRewardsShare := sdk.NewDec(int64(teamBalance)).Quo(sdk.NewDec(totalSupply)) + + // if team module balance is greater than total supply panic + if teamModuleRewardsShare.GT(sdk.NewDec(int64(1))) { + util.PanicHalt(uk, ctx, fmt.Sprintf("team module balance %v is higher than total supply %v", teamBalance, totalSupply)) + } + + // calculate the total reward in $KYVE the entire team module receives this block + teamModuleRewards := uint64(teamModuleRewardsShare.Mul(sdk.NewDec(blockProvision.Amount.Int64())).TruncateInt64()) + + // count total account rewards + totalAccountRewards := uint64(0) + + // distribute team module rewards between vesting accounts based on their vesting progress + for _, account := range tk.GetTeamVestingAccounts(ctx) { + // get current vesting progress + status := teamKeeper.GetVestingStatus(account, uint64(ctx.BlockTime().Unix())) + // calculate reward share of account + accountShare := sdk.NewDec(int64(status.TotalVestedAmount - account.UnlockedClaimed)).Quo(sdk.NewDec(int64(types.TEAM_ALLOCATION))) + // calculate total inflation rewards for account for this block + accountRewards := uint64(sdk.NewDec(int64(teamModuleRewards)).Mul(accountShare).TruncateInt64()) + + // save inflation rewards to account + account.TotalRewards += accountRewards + tk.SetTeamVestingAccount(ctx, account) + + // count total inflation rewards for team module + totalAccountRewards += accountRewards + } + + // panic if total account rewards are higher than team module rewards + if totalAccountRewards > teamModuleRewards { + util.PanicHalt(uk, ctx, fmt.Sprintf("account rewards %v are higher than entire team module rewards %v", totalAccountRewards, teamModuleRewards)) + } + + // track total authority inflation rewards + authority := tk.GetAuthority(ctx) + authority.TotalRewards += teamModuleRewards - totalAccountRewards + tk.SetAuthority(ctx, authority) + + // distribute part of block provision to team module + if err := util.TransferFromModuleToModule(bk, ctx, authTypes.FeeCollectorName, types.ModuleName, teamModuleRewards); err != nil { + util.PanicHalt(uk, ctx, err.Error()) + } + + tk.Logger(ctx).Info("distributed portion of minted coins", "amount", teamModuleRewards) +} diff --git a/x/team/client/cli/query.go b/x/team/client/cli/query.go new file mode 100644 index 00000000..77f75af3 --- /dev/null +++ b/x/team/client/cli/query.go @@ -0,0 +1,25 @@ +package cli + +import ( + "fmt" + + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/spf13/cobra" +) + +// GetQueryCmd returns the cli query commands for this module +func GetQueryCmd() *cobra.Command { + // Group fees queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + // TODO add missing queries + + return cmd +} diff --git a/x/team/client/cli/tx.go b/x/team/client/cli/tx.go new file mode 100644 index 00000000..fab2433c --- /dev/null +++ b/x/team/client/cli/tx.go @@ -0,0 +1,28 @@ +package cli + +import ( + "fmt" + + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/spf13/cobra" +) + +// GetTxCmd returns the transaction commands for this module +func GetTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("%s transactions subcommands", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CmdClaimUnlocked()) + cmd.AddCommand(CmdClawback()) + cmd.AddCommand(CmdCreateTeamVestingAccount()) + cmd.AddCommand(CmdClaimAuthorityRewards()) + cmd.AddCommand(CmdClaimAccountRewards()) + + return cmd +} diff --git a/x/team/client/cli/tx_claim_account_rewards.go b/x/team/client/cli/tx_claim_account_rewards.go new file mode 100644 index 00000000..1908fb06 --- /dev/null +++ b/x/team/client/cli/tx_claim_account_rewards.go @@ -0,0 +1,51 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdClaimAccountRewards() *cobra.Command { + cmd := &cobra.Command{ + Use: "claim-account-rewards [id] [amount] [recipient]", + Short: "Broadcast message claim-account-rewards", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + argAmount, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + argRecipient := args[2] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgClaimAccountRewards{ + Authority: clientCtx.GetFromAddress().String(), + Id: argId, + Amount: argAmount, + Recipient: argRecipient, + } + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/team/client/cli/tx_claim_authority_rewards.go b/x/team/client/cli/tx_claim_authority_rewards.go new file mode 100644 index 00000000..9ee65bee --- /dev/null +++ b/x/team/client/cli/tx_claim_authority_rewards.go @@ -0,0 +1,45 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdClaimAuthorityRewards() *cobra.Command { + cmd := &cobra.Command{ + Use: "claim-authority-rewards [amount] [recipient]", + Short: "Broadcast message claim-authority-rewards", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argAmount, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + argRecipient := args[1] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgClaimAuthorityRewards{ + Authority: clientCtx.GetFromAddress().String(), + Amount: argAmount, + Recipient: argRecipient, + } + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/team/client/cli/tx_claim_unlocked.go b/x/team/client/cli/tx_claim_unlocked.go new file mode 100644 index 00000000..be3399ef --- /dev/null +++ b/x/team/client/cli/tx_claim_unlocked.go @@ -0,0 +1,51 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdClaimUnlocked() *cobra.Command { + cmd := &cobra.Command{ + Use: "claim-unlocked [id] [amount] [recipient]", + Short: "Broadcast message claim-unlocked", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + argAmount, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + argRecipient := args[2] + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgClaimUnlocked{ + Authority: clientCtx.GetFromAddress().String(), + Id: argId, + Amount: argAmount, + Recipient: argRecipient, + } + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/team/client/cli/tx_clawback.go b/x/team/client/cli/tx_clawback.go new file mode 100644 index 00000000..108eff13 --- /dev/null +++ b/x/team/client/cli/tx_clawback.go @@ -0,0 +1,48 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdClawback() *cobra.Command { + cmd := &cobra.Command{ + Use: "clawback [id] [clawback]", + Short: "Broadcast message clawback", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argId, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + argClawbackTimeStamp, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgClawback{ + Authority: clientCtx.GetFromAddress().String(), + Id: argId, + Clawback: argClawbackTimeStamp, + } + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/team/client/cli/tx_create_team_vesting_account.go b/x/team/client/cli/tx_create_team_vesting_account.go new file mode 100644 index 00000000..97fae666 --- /dev/null +++ b/x/team/client/cli/tx_create_team_vesting_account.go @@ -0,0 +1,48 @@ +package cli + +import ( + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/spf13/cast" + "github.com/spf13/cobra" +) + +func CmdCreateTeamVestingAccount() *cobra.Command { + cmd := &cobra.Command{ + Use: "create [total_allocation] [commencement]", + Short: "Broadcast message create-team-vesting-account", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) (err error) { + argAllocation, err := cast.ToUint64E(args[0]) + if err != nil { + return err + } + + argCommencementTimeStamp, err := cast.ToUint64E(args[1]) + if err != nil { + return err + } + + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + msg := types.MsgCreateTeamVestingAccount{ + Authority: clientCtx.GetFromAddress().String(), + TotalAllocation: argAllocation, + Commencement: argCommencementTimeStamp, + } + if err := msg.ValidateBasic(); err != nil { + return err + } + return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), &msg) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/x/team/genesis.go b/x/team/genesis.go new file mode 100644 index 00000000..151da4d8 --- /dev/null +++ b/x/team/genesis.go @@ -0,0 +1,29 @@ +package team + +import ( + "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// InitGenesis initializes the team module's state from a provided genesis state. +func InitGenesis(ctx sdk.Context, k keeper.Keeper, genState types.GenesisState) { + k.SetAuthority(ctx, genState.Authority) + + for _, elem := range genState.AccountList { + k.SetTeamVestingAccount(ctx, elem) + } + + k.SetTeamVestingAccountCount(ctx, genState.AccountCount) +} + +// ExportGenesis returns the team module's exported genesis. +func ExportGenesis(ctx sdk.Context, k keeper.Keeper) *types.GenesisState { + genesis := types.DefaultGenesis() + + genesis.Authority = k.GetAuthority(ctx) + genesis.AccountList = k.GetTeamVestingAccounts(ctx) + genesis.AccountCount = k.GetTeamVestingAccountCount(ctx) + + return genesis +} diff --git a/x/team/keeper/abci_inflation_rewards_test.go b/x/team/keeper/abci_inflation_rewards_test.go new file mode 100644 index 00000000..ce394e36 --- /dev/null +++ b/x/team/keeper/abci_inflation_rewards_test.go @@ -0,0 +1,118 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + globalTypes "github.com/KYVENetwork/chain/x/global/types" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - abci.go + +* total_supply +* team_balance +* community_pool +* distribution + +*/ + +var _ = Describe("abci.go", Ordered, func() { + s := i.NewCleanChainAtTime(int64(types.TGE)) + + BeforeEach(func() { + s = i.NewCleanChainAtTime(int64(types.TGE)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("total_supply", func() { + // ARRANGE + b1, b2 := int64(0), int64(0) + + for t := 0; t < 100; t++ { + // ACT + b1 = s.App().BankKeeper.GetSupply(s.Ctx(), globalTypes.Denom).Amount.Int64() + s.Commit() + b2 = s.App().BankKeeper.GetSupply(s.Ctx(), globalTypes.Denom).Amount.Int64() + + // ASSERT + Expect(b2).To(BeNumerically(">", b1)) + } + }) + + It("team_balance", func() { + // ARRANGE + b1, b2 := uint64(0), uint64(0) + + for t := 0; t < 100; t++ { + // ACT + b1 = s.App().TeamKeeper.GetTeamInfo(s.Ctx()).RequiredModuleBalance + s.Commit() + b2 = s.App().TeamKeeper.GetTeamInfo(s.Ctx()).RequiredModuleBalance + + // ASSERT + Expect(b2).To(BeNumerically(">", b1)) + } + }) + + It("community_pool", func() { + // ARRANGE + b1, b2 := int64(0), int64(0) + + for t := 0; t < 100; t++ { + // ACT + b1 = s.App().DistributionKeeper.GetFeePool(s.Ctx()).CommunityPool.AmountOf(globalTypes.Denom).TruncateInt64() + s.Commit() + b2 = s.App().DistributionKeeper.GetFeePool(s.Ctx()).CommunityPool.AmountOf(globalTypes.Denom).TruncateInt64() + + // ASSERT + Expect(b2).To(BeNumerically(">", b1)) + } + }) + + It("distribution", func() { + for t := 0; t < 100; t++ { + // ARRANGE + + // get the team balance and total supply at current block which will + // be used to calculate distribution in BeginBlock of next block + teamBalance := sdk.NewDec(int64(s.GetBalanceFromModule(types.ModuleName))) + totalSupply := sdk.NewDec(s.App().BankKeeper.GetSupply(s.Ctx(), globalTypes.Denom).Amount.Int64()) + + // get current team and validators reward for this block + r1 := s.App().TeamKeeper.GetTeamInfo(s.Ctx()).TotalAuthorityRewards + c1 := uint64(s.App().DistributionKeeper.GetFeePool(s.Ctx()).CommunityPool.AmountOf(globalTypes.Denom).TruncateInt64()) + + // ACT + + // inflation is minted and distributed here + s.Commit() + + // calculate delta for team and community rewards in order to verify distribution + r2 := s.App().TeamKeeper.GetTeamInfo(s.Ctx()).TotalAuthorityRewards + c2 := uint64(s.App().DistributionKeeper.GetFeePool(s.Ctx()).CommunityPool.AmountOf(globalTypes.Denom).TruncateInt64()) + + teamReward := r2 - r1 + communityReward := c2 - c1 + + // get block reward for this block + minter := s.App().MintKeeper.GetMinter(s.Ctx()) + params := s.App().MintKeeper.GetParams(s.Ctx()) + blockProvision := minter.BlockProvision(params) + + // ASSERT + + // calculate if team and community distribution add up to total inflation reward + Expect(teamReward + communityReward).To(Equal(blockProvision.Amount.Uint64())) + + // calculate if distribution share matches with team balance and total supply + Expect(teamBalance.Mul(sdk.NewDec(blockProvision.Amount.Int64())).Quo(totalSupply).TruncateInt64()).To(Equal(int64(teamReward))) + } + }) +}) diff --git a/x/team/keeper/getters_team_vesting_account.go b/x/team/keeper/getters_team_vesting_account.go new file mode 100644 index 00000000..0008a891 --- /dev/null +++ b/x/team/keeper/getters_team_vesting_account.go @@ -0,0 +1,113 @@ +package keeper + +import ( + "encoding/binary" + + "github.com/KYVENetwork/chain/x/team/types" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetAuthority get the authority +func (k Keeper) GetAuthority(ctx sdk.Context) (authority types.Authority) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.AuthorityKey + bz := store.Get(byteKey) + + // Authority doesn't exist: no element + if bz == nil { + return + } + + k.cdc.MustUnmarshal(bz, &authority) + return +} + +// SetAuthority set the authority +func (k Keeper) SetAuthority(ctx sdk.Context, authority types.Authority) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.AuthorityKey + b := k.cdc.MustMarshal(&authority) + store.Set(byteKey, b) +} + +// GetTeamVestingAccountCount get the total number of team vesting accounts +func (k Keeper) GetTeamVestingAccountCount(ctx sdk.Context) uint64 { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.TeamVestingAccountCountKey + bz := store.Get(byteKey) + + // Count doesn't exist: no element + if bz == nil { + return 0 + } + + // Parse bytes + return binary.BigEndian.Uint64(bz) +} + +// SetTeamVestingAccountCount set the total number of team vesting accounts +func (k Keeper) SetTeamVestingAccountCount(ctx sdk.Context, count uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), []byte{}) + byteKey := types.TeamVestingAccountCountKey + bz := make([]byte, 8) + binary.BigEndian.PutUint64(bz, count) + store.Set(byteKey, bz) +} + +// AppendTeamVestingAccount appends a team vesting account in the store with a new id and update the count +func (k Keeper) AppendTeamVestingAccount( + ctx sdk.Context, + tva types.TeamVestingAccount, +) uint64 { + // Create the pool + count := k.GetTeamVestingAccountCount(ctx) + + // Set the ID of the appended value + tva.Id = count + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.TeamVestingAccountKey) + appendedValue := k.cdc.MustMarshal(&tva) + store.Set(types.TeamVestingAccountKeyPrefix(tva.Id), appendedValue) + + // Update team vesting account count + k.SetTeamVestingAccountCount(ctx, count+1) + + return count +} + +// GetTeamVestingAccount returns a team vesting account given its address. +func (k Keeper) GetTeamVestingAccount(ctx sdk.Context, id uint64) (tva types.TeamVestingAccount, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.TeamVestingAccountKey) + b := store.Get(types.TeamVestingAccountKeyPrefix(id)) + + if b == nil { + return tva, false + } + + k.cdc.MustUnmarshal(b, &tva) + return tva, true +} + +// GetTeamVestingAccounts returns all team vesting accounts +func (k Keeper) GetTeamVestingAccounts(ctx sdk.Context) (teamVestingAccounts []types.TeamVestingAccount) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.TeamVestingAccountKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{}) + + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var tva types.TeamVestingAccount + k.cdc.MustUnmarshal(iterator.Value(), &tva) + teamVestingAccounts = append(teamVestingAccounts, tva) + } + + return +} + +// SetTeamVestingAccount sets a specific team vesting account in the store. +func (k Keeper) SetTeamVestingAccount(ctx sdk.Context, tva types.TeamVestingAccount) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.TeamVestingAccountKey) + b := k.cdc.MustMarshal(&tva) + store.Set(types.TeamVestingAccountKeyPrefix(tva.Id), b) +} diff --git a/x/team/keeper/grpc_query.go b/x/team/keeper/grpc_query.go new file mode 100644 index 00000000..fc5892e7 --- /dev/null +++ b/x/team/keeper/grpc_query.go @@ -0,0 +1,7 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/x/team/types" +) + +var _ types.QueryServer = Keeper{} diff --git a/x/team/keeper/grpc_query_vesting_status_by_time.go b/x/team/keeper/grpc_query_vesting_status_by_time.go new file mode 100644 index 00000000..58e7cdf7 --- /dev/null +++ b/x/team/keeper/grpc_query_vesting_status_by_time.go @@ -0,0 +1,57 @@ +package keeper + +import ( + "context" + "time" + + "github.com/KYVENetwork/chain/x/team/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) TeamVestingStatusByTime(c context.Context, req *types.QueryTeamVestingStatusByTimeRequest) (*types.QueryTeamVestingStatusByTimeResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + account, found := k.GetTeamVestingAccount(ctx, req.Id) + if !found { + return nil, status.Error(codes.NotFound, "account not found") + } + + vestingStatus := GetVestingStatus(account, req.Time) + + queryVestingStatus := types.QueryVestingStatus{ + TotalVestedAmount: vestingStatus.TotalVestedAmount, + TotalUnlockedAmount: vestingStatus.TotalUnlockedAmount, + CurrentClaimableAmount: vestingStatus.CurrentClaimableAmount, + LockedVestedAmount: vestingStatus.LockedVestedAmount, + RemainingUnvestedAmount: vestingStatus.RemainingUnvestedAmount, + ClaimedAmount: account.UnlockedClaimed, + TotalRewards: account.TotalRewards, + ClaimedRewards: account.RewardsClaimed, + AvailableRewards: account.TotalRewards - account.RewardsClaimed, + } + + vestingPlan := GetVestingPlan(account) + + queryVestingPlan := types.QueryVestingPlan{ + Commencement: time.Unix(int64(account.Commencement), 0).String(), + TokenVestingStart: time.Unix(int64(vestingPlan.TokenVestingStart), 0).String(), + TokenVestingFinished: time.Unix(int64(vestingPlan.TokenVestingFinished), 0).String(), + TokenUnlockStart: time.Unix(int64(vestingPlan.TokenUnlockStart), 0).String(), + TokenUnlockFinished: time.Unix(int64(vestingPlan.TokenUnlockFinished), 0).String(), + Clawback: account.Clawback, + ClawbackAmount: vestingPlan.ClawbackAmount, + MaximumVestingAmount: vestingPlan.MaximumVestingAmount, + } + + return &types.QueryTeamVestingStatusByTimeResponse{ + RequestDate: time.Unix(ctx.BlockTime().Unix(), 0).String(), + Plan: &queryVestingPlan, + Status: &queryVestingStatus, + }, nil +} diff --git a/x/team/keeper/grpc_team_info.go b/x/team/keeper/grpc_team_info.go new file mode 100644 index 00000000..3faa1f4a --- /dev/null +++ b/x/team/keeper/grpc_team_info.go @@ -0,0 +1,21 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/team/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) TeamInfo(c context.Context, req *types.QueryTeamInfoRequest) (*types.QueryTeamInfoResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + + ctx := sdk.UnwrapSDKContext(c) + + return k.GetTeamInfo(ctx), nil +} diff --git a/x/team/keeper/grpc_team_vesting_account.go b/x/team/keeper/grpc_team_vesting_account.go new file mode 100644 index 00000000..bfa99f25 --- /dev/null +++ b/x/team/keeper/grpc_team_vesting_account.go @@ -0,0 +1,37 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/x/team/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) TeamVestingAccounts(c context.Context, req *types.QueryTeamVestingAccountsRequest) (*types.QueryTeamVestingAccountsResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + accounts := k.GetTeamVestingAccounts(ctx) + + return &types.QueryTeamVestingAccountsResponse{Accounts: accounts}, nil +} + +func (k Keeper) TeamVestingAccount(c context.Context, req *types.QueryTeamVestingAccountRequest) (*types.QueryTeamVestingAccountResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + account, found := k.GetTeamVestingAccount(ctx, req.Id) + if !found { + return nil, sdkerrors.ErrKeyNotFound + } + + return &types.QueryTeamVestingAccountResponse{Account: account}, nil +} diff --git a/x/team/keeper/grpc_team_vesting_status.go b/x/team/keeper/grpc_team_vesting_status.go new file mode 100644 index 00000000..4b0607f1 --- /dev/null +++ b/x/team/keeper/grpc_team_vesting_status.go @@ -0,0 +1,33 @@ +package keeper + +import ( + "context" + "time" + + "github.com/KYVENetwork/chain/x/team/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func (k Keeper) TeamVestingStatus(c context.Context, req *types.QueryTeamVestingStatusRequest) (*types.QueryTeamVestingStatusResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "invalid request") + } + ctx := sdk.UnwrapSDKContext(c) + + teamVesting, err := k.TeamVestingStatusByTime(ctx, &types.QueryTeamVestingStatusByTimeRequest{ + Id: req.Id, + Time: uint64(ctx.BlockTime().Unix()), + }) + if err != nil { + return nil, err + } + + return &types.QueryTeamVestingStatusResponse{ + RequestDate: time.Unix(ctx.BlockTime().Unix(), 0).String(), + Plan: teamVesting.Plan, + Status: teamVesting.Status, + }, nil +} diff --git a/x/team/keeper/keeper.go b/x/team/keeper/keeper.go new file mode 100644 index 00000000..86bf8874 --- /dev/null +++ b/x/team/keeper/keeper.go @@ -0,0 +1,50 @@ +package keeper + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/codec" + storeTypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" + + // Auth + authKeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // Team + "github.com/KYVENetwork/chain/x/team/types" +) + +type ( + Keeper struct { + cdc codec.BinaryCodec + storeKey storeTypes.StoreKey + + accountKeeper authKeeper.AccountKeeper + bankKeeper bankKeeper.Keeper + } +) + +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storeTypes.StoreKey, + accountKeeper authKeeper.AccountKeeper, + bankKeeper bankKeeper.Keeper, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: storeKey, + + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func (k Keeper) StoreKey() storeTypes.StoreKey { + return k.storeKey +} diff --git a/x/team/keeper/keeper_test.go b/x/team/keeper/keeper_test.go new file mode 100644 index 00000000..6dee1b57 --- /dev/null +++ b/x/team/keeper/keeper_test.go @@ -0,0 +1,16 @@ +package keeper_test + +import ( + "fmt" + "testing" + + "github.com/KYVENetwork/chain/x/team/types" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestTeamKeeper(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, fmt.Sprintf("x/%s Keeper Test Suite", types.ModuleName)) +} diff --git a/x/team/keeper/logic_team.go b/x/team/keeper/logic_team.go new file mode 100644 index 00000000..5c1b1396 --- /dev/null +++ b/x/team/keeper/logic_team.go @@ -0,0 +1,161 @@ +package keeper + +import ( + "github.com/KYVENetwork/chain/util" + globalTypes "github.com/KYVENetwork/chain/x/global/types" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetVestingStatus returns all computed values which are dependent on the time +// for the given account +func GetVestingStatus(account types.TeamVestingAccount, time uint64) *types.VestingStatus { + status := types.VestingStatus{} + + // get total allocation + status.TotalVestedAmount = getVestedAmount(account, time) + status.TotalUnlockedAmount = getUnlockedAmount(account, time) + if status.TotalUnlockedAmount > account.UnlockedClaimed { + status.CurrentClaimableAmount = getUnlockedAmount(account, time) - account.UnlockedClaimed + } + + status.LockedVestedAmount = status.TotalVestedAmount - status.TotalUnlockedAmount + status.RemainingUnvestedAmount = getVestingMaxAmount(account) - status.TotalVestedAmount + + return &status +} + +// GetVestingPlan returns all computed static values for a given account +func GetVestingPlan(account types.TeamVestingAccount) *types.VestingPlan { + plan := types.VestingPlan{} + + plan.MaximumVestingAmount = getVestingMaxAmount(account) + plan.ClawbackAmount = account.TotalAllocation - plan.MaximumVestingAmount + + plan.TokenVestingStart = account.Commencement + types.CLIFF_DURATION + plan.TokenVestingFinished = account.Commencement + types.VESTING_DURATION + + plan.TokenUnlockStart = getLockUpReferenceDate(account) + plan.TokenUnlockFinished = getLockUpReferenceDate(account) + types.UNLOCK_DURATION + + return &plan +} + +// GetIssuedTeamAllocation gets the total amount in $KYVE which is issued to all team vesting accounts. +// It is equal to the sum of all max vesting amounts, because normally the usage of all +// vesting accounts is the sum of all allocations minus the clawback which getVestingMaxAmount +// already takes into account +func (k Keeper) GetIssuedTeamAllocation(ctx sdk.Context) (used uint64) { + for _, account := range k.GetTeamVestingAccounts(ctx) { + used += getVestingMaxAmount(account) + } + + return +} + +func (k Keeper) GetTeamInfo(ctx sdk.Context) (info *types.QueryTeamInfoResponse) { + info = &types.QueryTeamInfoResponse{} + + info.Authority = types.AUTHORITY_ADDRESS + info.TotalTeamAllocation = types.TEAM_ALLOCATION + + info.IssuedTeamAllocation = k.GetIssuedTeamAllocation(ctx) + info.AvailableTeamAllocation = types.TEAM_ALLOCATION - info.IssuedTeamAllocation + + authority := k.GetAuthority(ctx) + info.TotalAuthorityRewards = authority.TotalRewards + info.ClaimedAuthorityRewards = authority.RewardsClaimed + info.AvailableAuthorityRewards = authority.TotalRewards - authority.RewardsClaimed + + info.RequiredModuleBalance = types.TEAM_ALLOCATION + info.AvailableAuthorityRewards + + for _, account := range k.GetTeamVestingAccounts(ctx) { + info.TotalAccountRewards += account.TotalRewards + info.ClaimedAccountRewards += account.RewardsClaimed + info.AvailableAccountRewards += account.TotalRewards - account.RewardsClaimed + + info.RequiredModuleBalance += account.TotalRewards - account.RewardsClaimed + info.RequiredModuleBalance -= account.UnlockedClaimed + } + + coins := k.bankKeeper.GetBalance(ctx, k.accountKeeper.GetModuleAddress(types.ModuleName), globalTypes.Denom) + info.TeamModuleBalance = uint64(coins.Amount.Int64()) + + return +} + +// getVestedAmount returns the total amount of $KYVE that has vested until the given time for the given user. +// The function is well-defined for all values of t +func getVestedAmount(account types.TeamVestingAccount, time uint64) uint64 { + // the account vesting duration is the time in seconds an account is already vesting + accountVestingDuration := uint64(0) + + // if the specified time is after the commencement date the vesting duration is longer than zero + if time > account.Commencement { + accountVestingDuration = time - account.Commencement + } + + // if a clawback time is defined and if it is before the specified time the vesting duration only goes + // until the clawback time + if account.Clawback > 0 && account.Clawback < time { + accountVestingDuration = account.Clawback - account.Commencement + } + + // if account is vesting less than the vesting cliff the vested amount is zero + if accountVestingDuration < types.CLIFF_DURATION { + return 0 + } + + // if user is vesting less than the vesting duration the vested amount is linear to the membership time + if accountVestingDuration < types.VESTING_DURATION { + vested := sdk.NewDec(int64(account.TotalAllocation)). + Mul(sdk.NewDec(int64(accountVestingDuration))). + Quo(sdk.NewDec(int64(types.VESTING_DURATION))) + + return uint64(vested.TruncateInt64()) + } + + // if user is vesting longer than the vesting duration the entire allocation has vested + return account.TotalAllocation +} + +// getVestingMaxAmount gets the maximum amount an account can possibly vest +func getVestingMaxAmount(account types.TeamVestingAccount) uint64 { + // in order to get the maximum possible vesting amount we add the total vesting duration to the + // commencement date as the specified time + return getVestedAmount(account, account.Commencement+types.VESTING_DURATION) +} + +// getLockUpReferenceDate gets the unix time the unlocking starts for an account +func getLockUpReferenceDate(account types.TeamVestingAccount) uint64 { + // the unlocking starts exactly 1 year after the commencement or TGE, whatever the latter is + return util.MaxUInt64(account.Commencement, types.TGE) + types.CLIFF_DURATION +} + +// getUnlockedAmount returns total amount of $KYVE that has unlocked until the given time for the given user. +// The function is well-defined for all values of t +func getUnlockedAmount(account types.TeamVestingAccount, time uint64) uint64 { + // get the unix time the unlocking for an account starts + timeUnlock := getLockUpReferenceDate(account) + + // if the specified time is before the lockup reference data the unlocked amount is zero + if time < timeUnlock { + return 0 + } + // => time - timeUnlock >= 0 + + if time-timeUnlock < types.UNLOCK_DURATION { + // get the total vested amount based on specified time + vested := getVestedAmount(account, time) + + // calculate the unlocked amount linearly based on time + unlocked := sdk.NewDec(int64(vested)). + Mul(sdk.NewDec(int64(time - timeUnlock))). + Quo(sdk.NewDec(int64(types.UNLOCK_DURATION))) + + return uint64(unlocked.TruncateInt64()) + } + + // if specified time comes after the unlock duration the full maximum vesting amount is unlocked + return getVestingMaxAmount(account) +} diff --git a/x/team/keeper/logic_team_test.go b/x/team/keeper/logic_team_test.go new file mode 100644 index 00000000..5554d862 --- /dev/null +++ b/x/team/keeper/logic_team_test.go @@ -0,0 +1,402 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + . "github.com/onsi/ginkgo/v2" + + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - logic_team.go + +* leave_minus_join_lt_1y +* leave_minus_join_lt_3y_and_tge_lt_join +* leave_minus_join_lt_3y_and_tge_gt_join +* leave_minus_join_gt_3y_and_tge_gt_join +* leave_minus_join_gt_3y_and_tge_lt_join +* no_clawback_tjoin_lt_tge +* no_clawback_tjoin_gt_tge +* leave_minus_join_gt_3y_and_tge_eq_join + +*/ + +func createTeamAccount(allocation, commencement, duration uint64) types.TeamVestingAccount { + return types.TeamVestingAccount{ + Id: 0, + TotalAllocation: allocation, + Commencement: commencement, + Clawback: commencement + duration, + UnlockedClaimed: 0, + LastClaimedTime: 0, + } +} + +var _ = Describe("logic_team_test.go", Ordered, func() { + const YEAR = uint64(60 * 60 * 24 * 365) + const MONTH = uint64(5 * 60 * 24 * 365) + const ALLOCATION = 1_000_000 * i.KYVE + + It("leave_minus_join_lt_1y", func() { + // ARRANGE + accountB3y := createTeamAccount(1_000_000*i.KYVE, types.TGE-3*YEAR, YEAR-1) + accountB2y := createTeamAccount(1_000_000*i.KYVE, types.TGE-2*YEAR, YEAR-1) + accountB1y := createTeamAccount(1_000_000*i.KYVE, types.TGE-1*YEAR, YEAR-1) + accountB0y := createTeamAccount(1_000_000*i.KYVE, types.TGE-0*YEAR, YEAR-1) + accountA1y := createTeamAccount(1_000_000*i.KYVE, types.TGE+1*YEAR, YEAR-1) + + // ASSERT + status := teamKeeper.GetVestingStatus(accountB3y, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.RemainingUnvestedAmount)) + + status = teamKeeper.GetVestingStatus(accountB2y, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.RemainingUnvestedAmount)) + + status = teamKeeper.GetVestingStatus(accountB1y, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.RemainingUnvestedAmount)) + + status = teamKeeper.GetVestingStatus(accountB0y, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.RemainingUnvestedAmount)) + + status = teamKeeper.GetVestingStatus(accountA1y, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.RemainingUnvestedAmount)) + }) + + It("leave_minus_join_lt_3y_and_tge_lt_join", func() { + // ARRANGE + tjoin := types.TGE + 6*MONTH + account := createTeamAccount(ALLOCATION, tjoin, 30*MONTH) + // Maximum is ALLOCATION*30/36 + + // ASSERT + // t < tjoin => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION * 30 / 36).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*30/36 - ALLOCATION/3).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_leave => max amount (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t > tjoin + Cliff + Unlock => everything is unlocked + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(ALLOCATION * 30 / 36).To(Equal(statusJCU.TotalUnlockedAmount)) + Expect(ALLOCATION * 30 / 36).To(Equal(statusJCU.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(statusJCU.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + }) + + It("leave_minus_join_lt_3y_and_tge_gt_join", func() { + // ARRANGE + tjoin := types.TGE - 6*MONTH + account := createTeamAccount(ALLOCATION, tjoin, 30*MONTH) + // Maximum is ALLOCATION*30/36 + + // ASSERT + // t < tjoin + 1 YR => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION * 30 / 36).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*30/36 - ALLOCATION/3).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_leave => max amount (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t = tjoin + Cliff + Unlock => everything is vested but unlock still ongoing + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION * 30 / 36).To(Equal(statusJCU.TotalVestedAmount)) + Expect(statusJCU.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusJCU.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusJCU.TotalUnlockedAmount).To(Equal(statusJCU.CurrentClaimableAmount)) + Expect(statusJCU.TotalVestedAmount - statusJCU.TotalUnlockedAmount).To(Equal(statusJCU.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + + // t = TGE + Cliff + Unlock => everything is unlocked + statusAT := teamKeeper.GetVestingStatus(account, types.TGE+3*YEAR) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAT.TotalVestedAmount)) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAT.TotalUnlockedAmount)) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAT.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(statusAT.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusAT.RemainingUnvestedAmount)) + }) + + It("leave_minus_join_gt_3y_and_tge_gt_join", func() { + // ARRANGE + tjoin := types.TGE - 6*MONTH + account := createTeamAccount(ALLOCATION, tjoin, 36*MONTH) + + // ASSERT + // t < tjoin + 1 YR => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested, t < t_unlock + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*2/3 + 1).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_join * 2.5 Years => max amount (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(ALLOCATION*1/6 + 1).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t = tjoin + Cliff + Unlock => everything is vested but unlock still ongoing + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION).To(Equal(statusJCU.TotalVestedAmount)) + Expect(statusJCU.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusJCU.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusJCU.TotalUnlockedAmount).To(Equal(statusJCU.CurrentClaimableAmount)) + Expect(statusJCU.TotalVestedAmount - statusJCU.TotalUnlockedAmount).To(Equal(statusJCU.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + + // t = TGE + Cliff + Unlock => everything is unlocked + statusAT := teamKeeper.GetVestingStatus(account, types.TGE+3*YEAR) + Expect(ALLOCATION).To(Equal(statusAT.TotalVestedAmount)) + Expect(ALLOCATION).To(Equal(statusAT.TotalUnlockedAmount)) + Expect(ALLOCATION).To(Equal(statusAT.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(statusAT.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusAT.RemainingUnvestedAmount)) + }) + + It("leave_minus_join_gt_3y_and_tge_lt_join", func() { + // ARRANGE + tjoin := types.TGE + 6*MONTH + account := createTeamAccount(ALLOCATION, tjoin, 36*MONTH) + + // ASSERT + // t < tjoin + 1 YR => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested, t < t_unlock + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*2/3 + 1).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_join * 2.5 Years => (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(ALLOCATION*1/6 + 1).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t = tjoin + Cliff + Unlock => everything is vested but unlock still ongoing + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION).To(Equal(statusJCU.TotalVestedAmount)) + Expect(statusJCU.TotalUnlockedAmount).To(Equal(ALLOCATION)) + Expect(statusJCU.CurrentClaimableAmount).To(Equal(ALLOCATION)) + Expect(statusJCU.LockedVestedAmount).To(Equal(uint64(0))) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + }) + + It("no_clawback_t_join_lt_tge", func() { + // ARRANGE + tjoin := types.TGE - 6*MONTH + account := types.TeamVestingAccount{ + TotalAllocation: ALLOCATION, + Commencement: tjoin, + } + + // ASSERT + // t < tjoin + 1 YR => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested, t < t_unlock + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*2/3 + 1).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_join * 2.5 Years => max amount (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(ALLOCATION*1/6 + 1).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t = tjoin + Cliff + Unlock => everything is vested but unlock still ongoing + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION).To(Equal(statusJCU.TotalVestedAmount)) + Expect(statusJCU.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusJCU.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION*30/36)) + Expect(statusJCU.TotalUnlockedAmount).To(Equal(statusJCU.CurrentClaimableAmount)) + Expect(statusJCU.TotalVestedAmount - statusJCU.TotalUnlockedAmount).To(Equal(statusJCU.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + + // t = TGE + Cliff + Unlock => everything is unlocked + statusAT := teamKeeper.GetVestingStatus(account, types.TGE+3*YEAR) + Expect(ALLOCATION).To(Equal(statusAT.TotalVestedAmount)) + Expect(ALLOCATION).To(Equal(statusAT.TotalUnlockedAmount)) + Expect(ALLOCATION).To(Equal(statusAT.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(statusAT.LockedVestedAmount)) + Expect(uint64(0)).To(Equal(statusAT.RemainingUnvestedAmount)) + }) + + It("no_clawback_t_join_gt_tge", func() { + // ARRANGE + tjoin := types.TGE + 6*MONTH + account := types.TeamVestingAccount{ + TotalAllocation: ALLOCATION, + Commencement: tjoin, + } + + // ASSERT + // t < tjoin + 1 YR => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested, t < t_unlock + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*2/3 + 1).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_join * 2.5 Years => (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(ALLOCATION*1/6 + 1).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t = tjoin + Cliff + Unlock => everything is vested but unlock still ongoing + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION).To(Equal(statusJCU.TotalVestedAmount)) + Expect(statusJCU.TotalUnlockedAmount).To(Equal(ALLOCATION)) + Expect(statusJCU.CurrentClaimableAmount).To(Equal(ALLOCATION)) + Expect(statusJCU.LockedVestedAmount).To(Equal(uint64(0))) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + }) + + It("leave_minus_join_gt_3y_and_tge_eq_join", func() { + // ARRANGE + tjoin := types.TGE + account := createTeamAccount(ALLOCATION, tjoin, 36*MONTH) + + // ASSERT + // t == tjoin => everything is unvested + status := teamKeeper.GetVestingStatus(account, types.TGE) + Expect(uint64(0)).To(Equal(status.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status.CurrentClaimableAmount)) + Expect(uint64(0)).To(Equal(status.LockedVestedAmount)) + Expect(ALLOCATION).To(Equal(status.RemainingUnvestedAmount)) + + // t = tjoin + 1 Year => 1/3 is vested, t < t_unlock + status1Y := teamKeeper.GetVestingStatus(account, tjoin+YEAR) + Expect(ALLOCATION / 3).To(Equal(status1Y.TotalVestedAmount)) + Expect(uint64(0)).To(Equal(status1Y.TotalUnlockedAmount)) + Expect(uint64(0)).To(Equal(status1Y.CurrentClaimableAmount)) + Expect(ALLOCATION / 3).To(Equal(status1Y.LockedVestedAmount)) + Expect(ALLOCATION*2/3 + 1).To(Equal(status1Y.RemainingUnvestedAmount)) + + // t = t_join * 2.5 Years => (5/6) is vested + statusAL := teamKeeper.GetVestingStatus(account, tjoin+30*MONTH) + Expect(ALLOCATION * 30 / 36).To(Equal(statusAL.TotalVestedAmount)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically(">", 0)) + Expect(statusAL.TotalUnlockedAmount).To(BeNumerically("<=", ALLOCATION)) + Expect(statusAL.TotalUnlockedAmount).To(Equal(statusAL.CurrentClaimableAmount)) + Expect(statusAL.TotalVestedAmount - statusAL.TotalUnlockedAmount).To(Equal(statusAL.LockedVestedAmount)) + Expect(ALLOCATION*1/6 + 1).To(Equal(statusAL.RemainingUnvestedAmount)) + + // t = tjoin + Cliff + Unlock => everything is vested but unlock still ongoing + statusJCU := teamKeeper.GetVestingStatus(account, tjoin+3*YEAR) + Expect(ALLOCATION).To(Equal(statusJCU.TotalVestedAmount)) + Expect(statusJCU.TotalUnlockedAmount).To(Equal(ALLOCATION)) + Expect(statusJCU.CurrentClaimableAmount).To(Equal(ALLOCATION)) + Expect(statusJCU.LockedVestedAmount).To(Equal(uint64(0))) + Expect(uint64(0)).To(Equal(statusJCU.RemainingUnvestedAmount)) + }) +}) + +//func debugPrintStatus(status *types.VestingStatus) { +// fmt.Printf("TotalVestedAmount: %d\nTotalUnlocked: %d\nCurrentClaimable: %d\nLocked: %d\nRemainingUnvested: %d\n", +// status.TotalVestedAmount, status.TotalUnlockedAmount, status.CurrentClaimableAmount, status.LockedVestedAmount, status.RemainingUnvestedAmount) +//} diff --git a/x/team/keeper/msg_server.go b/x/team/keeper/msg_server.go new file mode 100644 index 00000000..597e2219 --- /dev/null +++ b/x/team/keeper/msg_server.go @@ -0,0 +1,15 @@ +package keeper + +import "github.com/KYVENetwork/chain/x/team/types" + +type msgServer struct { + Keeper +} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{Keeper: keeper} +} + +var _ types.MsgServer = msgServer{} diff --git a/x/team/keeper/msg_server_claim_account_rewards.go b/x/team/keeper/msg_server_claim_account_rewards.go new file mode 100644 index 00000000..dec85309 --- /dev/null +++ b/x/team/keeper/msg_server_claim_account_rewards.go @@ -0,0 +1,47 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + + "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) ClaimAccountRewards(goCtx context.Context, msg *types.MsgClaimAccountRewards) (*types.MsgClaimAccountRewardsResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if types.AUTHORITY_ADDRESS != msg.Authority { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidAuthority.Error(), types.AUTHORITY_ADDRESS, msg.Authority) + } + + account, found := k.GetTeamVestingAccount(ctx, msg.Id) + if !found { + return nil, sdkErrors.ErrNotFound + } + + // check if account has available inflation rewards which can be claimed + if account.TotalRewards-account.RewardsClaimed < msg.Amount { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrClaimAmountTooHigh.Error(), account.TotalRewards-account.RewardsClaimed, msg.Amount) + } + + // send inflation rewards to recipient + if err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, msg.Recipient, msg.Amount); err != nil { + return nil, err + } + + // increase claimed inflation rewards + account.RewardsClaimed += msg.Amount + k.SetTeamVestingAccount(ctx, account) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventClaimInflationRewards{ + Id: account.Id, + Amount: msg.Amount, + Recipient: msg.Recipient, + }) + + return &types.MsgClaimAccountRewardsResponse{}, nil +} diff --git a/x/team/keeper/msg_server_claim_account_rewards_test.go b/x/team/keeper/msg_server_claim_account_rewards_test.go new file mode 100644 index 00000000..589cd3b2 --- /dev/null +++ b/x/team/keeper/msg_server_claim_account_rewards_test.go @@ -0,0 +1,180 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - msg_server_claim_account_rewards.go + +* invalid_authority +* claim_more_rewards_than_available +* partially_claim_rewards_once +* claim_rewards_with_3_months_interval + +*/ + +var _ = Describe("msg_server_claim_account_rewards.go", Ordered, func() { + s := i.NewCleanChainAtTime(int64(types.TGE)) + + BeforeEach(func() { + // init new clean chain at TGE time + s = i.NewCleanChainAtTime(int64(types.TGE)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("invalid_authority", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE, + }) + + s.CommitAfterSeconds(2 * YEAR) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + + Expect(tva.TotalRewards).To(BeNumerically(">", uint64(0))) + Expect(tva.RewardsClaimed).To(BeZero()) + + // ACT + _, err := s.RunTx(&types.MsgClaimAccountRewards{ + Authority: i.ALICE, + Id: 0, + Amount: tva.TotalRewards, + Recipient: i.BOB, + }) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("claim_more_rewards_than_available", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE, + }) + + s.CommitAfterSeconds(2 * YEAR) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + + Expect(tva.TotalRewards).To(BeNumerically(">", uint64(0))) + Expect(tva.RewardsClaimed).To(BeZero()) + + // ACT + _, err := s.RunTx(&types.MsgClaimAccountRewards{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: tva.TotalRewards + 1, + Recipient: i.ALICE, + }) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("partially_claim_rewards_once", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE, + }) + + s.CommitAfterSeconds(2 * YEAR) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + + Expect(tva.TotalRewards).To(BeNumerically(">", uint64(0))) + Expect(tva.RewardsClaimed).To(BeZero()) + s.PerformValidityChecks() + + // ACT + s.RunTxTeamSuccess(&types.MsgClaimAccountRewards{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 100, + Recipient: i.ALICE, + }) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_000*i.KYVE + 100)) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.TotalRewards).To(BeNumerically(">", uint64(0))) + Expect(tva.RewardsClaimed).To(Equal(uint64(100))) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.ClaimedAccountRewards).To(Equal(uint64(100))) + Expect(info.AvailableAccountRewards).To(Equal(info.TotalAccountRewards - uint64(100))) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - uint64(100))) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + }) + + It("claim_rewards_with_3_months_interval", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE, + }) + + totalClaimed := uint64(0) + s.PerformValidityChecks() + + // ACT + for m := 1; m <= 16; m++ { + s.CommitAfterSeconds(3 * MONTH) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + rewards := tva.TotalRewards - tva.RewardsClaimed + + // account should only receive inflation rewards if it has vested $KYVE + if m < 4 { + Expect(rewards).To(BeZero()) + } else if m <= 12 { + Expect(rewards).To(BeNumerically(">", uint64(0))) + } else { + Expect(rewards).To(BeZero()) + } + + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: status.CurrentClaimableAmount, + Recipient: i.BOB, + }) + + s.RunTxTeamSuccess(&types.MsgClaimAccountRewards{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: rewards, + Recipient: i.ALICE, + }) + + totalClaimed += rewards + } + + // ASSERT + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_000*i.KYVE + totalClaimed)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.ClaimedAccountRewards).To(Equal(totalClaimed)) + Expect(info.AvailableAccountRewards).To(Equal(info.TotalAccountRewards - totalClaimed)) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - totalClaimed - 1_000_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + }) +}) diff --git a/x/team/keeper/msg_server_claim_authority_rewards.go b/x/team/keeper/msg_server_claim_authority_rewards.go new file mode 100644 index 00000000..0ee74ff4 --- /dev/null +++ b/x/team/keeper/msg_server_claim_authority_rewards.go @@ -0,0 +1,43 @@ +package keeper + +import ( + "context" + + "github.com/KYVENetwork/chain/util" + + "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) ClaimAuthorityRewards(goCtx context.Context, msg *types.MsgClaimAuthorityRewards) (*types.MsgClaimAuthorityRewardsResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if types.AUTHORITY_ADDRESS != msg.Authority { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidAuthority.Error(), types.AUTHORITY_ADDRESS, msg.Authority) + } + + authority := k.GetAuthority(ctx) + + // check if authority has enough available rewards to claim + if authority.TotalRewards-authority.RewardsClaimed < msg.Amount { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrClaimAmountTooHigh.Error(), authority.TotalRewards-authority.RewardsClaimed, msg.Amount) + } + + // send authority inflation rewards to recipient + if err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, msg.Recipient, msg.Amount); err != nil { + return nil, err + } + + // increase claimed inflation rewards + authority.RewardsClaimed += msg.Amount + k.SetAuthority(ctx, authority) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventClaimAuthorityRewards{ + Amount: msg.Amount, + Recipient: msg.Recipient, + }) + + return &types.MsgClaimAuthorityRewardsResponse{}, nil +} diff --git a/x/team/keeper/msg_server_claim_authority_rewards_test.go b/x/team/keeper/msg_server_claim_authority_rewards_test.go new file mode 100644 index 00000000..cf9fa001 --- /dev/null +++ b/x/team/keeper/msg_server_claim_authority_rewards_test.go @@ -0,0 +1,108 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/team/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - msg_server_claim_authority_rewards.go + +* invalid_authority +* claim_more_rewards_than_available +* partially_claim_rewards_once +* claim_rewards_with_3_months_interval + +*/ + +var _ = Describe("msg_server_claim_authority_rewards.go", Ordered, func() { + s := i.NewCleanChainAtTime(int64(types.TGE)) + + BeforeEach(func() { + // init new clean chain at TGE time + s = i.NewCleanChainAtTime(int64(types.TGE)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("invalid_authority", func() { + // ARRANGE + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + + // ACT + _, err := s.RunTx(&types.MsgClaimAuthorityRewards{ + Authority: i.ALICE, + Amount: info.AvailableAuthorityRewards, + Recipient: i.BOB, + }) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("claim_more_rewards_than_available", func() { + // ARRANGE + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + + // ACT + _, err := s.RunTx(&types.MsgClaimAuthorityRewards{ + Authority: types.AUTHORITY_ADDRESS, + Amount: info.AvailableAuthorityRewards + 1, + Recipient: i.ALICE, + }) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("partially_claim_rewards_once", func() { + // ACT + s.RunTxTeamSuccess(&types.MsgClaimAuthorityRewards{ + Authority: types.AUTHORITY_ADDRESS, + Amount: 100, + Recipient: i.ALICE, + }) + + // ASSERT + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_000*i.KYVE + 100)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.ClaimedAuthorityRewards).To(Equal(uint64(100))) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards - uint64(100))) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards - uint64(100))) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + }) + + It("claim_rewards_with_3_months_interval", func() { + // ARRANGE + totalClaimed := uint64(0) + + // ACT + for m := 1; m <= 12; m++ { + s.CommitAfterSeconds(3 * MONTH) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + + s.RunTxTeamSuccess(&types.MsgClaimAuthorityRewards{ + Authority: types.AUTHORITY_ADDRESS, + Amount: info.AvailableAuthorityRewards, + Recipient: i.ALICE, + }) + + totalClaimed += info.AvailableAuthorityRewards + } + + // ASSERT + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + + Expect(info.ClaimedAuthorityRewards).To(Equal(totalClaimed)) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards - totalClaimed)) + + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_000*i.KYVE + totalClaimed)) + }) +}) diff --git a/x/team/keeper/msg_server_claim_unlocked.go b/x/team/keeper/msg_server_claim_unlocked.go new file mode 100644 index 00000000..5f12ea49 --- /dev/null +++ b/x/team/keeper/msg_server_claim_unlocked.go @@ -0,0 +1,51 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/util" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) ClaimUnlocked(goCtx context.Context, msg *types.MsgClaimUnlocked) (*types.MsgClaimUnlockedResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if types.AUTHORITY_ADDRESS != msg.Authority { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidAuthority.Error(), types.AUTHORITY_ADDRESS, msg.Authority) + } + + account, found := k.GetTeamVestingAccount(ctx, msg.Id) + if !found { + return nil, sdkErrors.ErrNotFound + } + + // get current claimable amount + currentProgress := GetVestingStatus(account, uint64(ctx.BlockTime().Unix())) + + // throw error if the requested claim amount is bigger than the available unlocked amount + if msg.Amount > currentProgress.CurrentClaimableAmount { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrClaimAmountTooHigh.Error(), msg.Amount, currentProgress.CurrentClaimableAmount) + } + + // Transfer claim amount from this module to recipient. + if err := util.TransferFromModuleToAddress(k.bankKeeper, ctx, types.ModuleName, msg.Recipient, msg.Amount); err != nil { + return nil, err + } + + // update claimed amount of unlocked $KYVE + account.UnlockedClaimed += msg.Amount + account.LastClaimedTime = uint64(ctx.BlockTime().Unix()) + + k.SetTeamVestingAccount(ctx, account) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventClaimedUnlocked{ + Id: account.Id, + Amount: msg.Amount, + Recipient: msg.Recipient, + }) + + return &types.MsgClaimUnlockedResponse{}, nil +} diff --git a/x/team/keeper/msg_server_claim_unlocked_test.go b/x/team/keeper/msg_server_claim_unlocked_test.go new file mode 100644 index 00000000..41547e53 --- /dev/null +++ b/x/team/keeper/msg_server_claim_unlocked_test.go @@ -0,0 +1,223 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - msg_server_claim_unlocked.go + +* invalid_authority +* claim_zero_unlocked +* partially_claim_unlocked_once +* claim_entire_allocation_with_3_months_interval +* claim_twice_in_same_block + +*/ + +func appendTeamVestingAccount(s *i.KeeperTestSuite, commencement, clawback uint64) { + s.App().TeamKeeper.AppendTeamVestingAccount(s.Ctx(), types.TeamVestingAccount{ + TotalAllocation: 1_000_000 * i.KYVE, + Commencement: commencement, + Clawback: clawback, + UnlockedClaimed: 0, + LastClaimedTime: 0, + }) +} + +const ( + YEAR = uint64(60 * 60 * 24 * 365) + MONTH = uint64(5 * 60 * 24 * 365) + ALLOCATION = 1_000_000 * i.KYVE +) + +var _ = Describe("msg_server_claim_unlocked.go", Ordered, func() { + s := i.NewCleanChainAtTime(int64(types.TGE)) + + BeforeEach(func() { + // init new clean chain at TGE time + s = i.NewCleanChainAtTime(int64(types.TGE)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("invalid_authority", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - YEAR, + }) + + s.CommitAfterSeconds(3 * YEAR) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.CurrentClaimableAmount).To(Equal(1_000_000 * i.KYVE)) + s.PerformValidityChecks() + + // ACT + _, err := s.RunTx(&types.MsgClaimUnlocked{ + Authority: i.BOB, + Id: 0, + Amount: 1_000_000 * i.KYVE, + Recipient: i.ALICE, + }) + + // ASSERT + Expect(err).To(HaveOccurred()) + }) + + It("claim_zero_unlocked", func() { + // ARRANGE + appendTeamVestingAccount(s, types.TGE-11*MONTH, 0) + + // ASSERT + s.RunTxTeamError(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 100, + Recipient: i.ALICE, + }) + + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 0, + Recipient: i.ALICE, + }) + }) + + It("partially_claim_unlocked_once", func() { + // ARRANGE + appendTeamVestingAccount(s, types.TGE, 0) + + s.CommitAfterSeconds(3 * YEAR) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + Expect(status.CurrentClaimableAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_000 * i.KYVE)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + s.PerformValidityChecks() + + // ACT + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 100_000 * i.KYVE, + Recipient: i.ALICE, + }) + + // ASSERT + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + Expect(status.CurrentClaimableAmount).To(Equal(900_000 * i.KYVE)) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(101_000 * i.KYVE)) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - 100_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + }) + + It("claim_entire_allocation_with_3_months_interval", func() { + // ARRANGE + appendTeamVestingAccount(s, types.TGE, 0) + + s.CommitAfterSeconds(YEAR) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + Expect(status.CurrentClaimableAmount).To(BeZero()) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_000 * i.KYVE)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + s.PerformValidityChecks() + + // ACT + for m := 1; m <= 8; m++ { + s.CommitAfterSeconds(3 * MONTH) + + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: status.CurrentClaimableAmount, + Recipient: i.ALICE, + }) + } + + // ASSERT + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + Expect(status.CurrentClaimableAmount).To(BeZero()) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_001_000 * i.KYVE)) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - 1_000_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + }) + + It("claim_twice_in_same_block", func() { + // ARRANGE + appendTeamVestingAccount(s, types.TGE-YEAR, 0) + + s.CommitAfterSeconds(3 * YEAR) + + // ACT + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: ALLOCATION / 2, + Recipient: i.ALICE, + }) + + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: ALLOCATION / 2, + Recipient: i.ALICE, + }) + + s.RunTxTeamError(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 1, + Recipient: i.ALICE, + }) + + // ASSERT + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + Expect(status.CurrentClaimableAmount).To(BeZero()) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(1_001_000 * i.KYVE)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - 1_000_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + }) +}) diff --git a/x/team/keeper/msg_server_clawback.go b/x/team/keeper/msg_server_clawback.go new file mode 100644 index 00000000..8016b839 --- /dev/null +++ b/x/team/keeper/msg_server_clawback.go @@ -0,0 +1,44 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) Clawback(goCtx context.Context, msg *types.MsgClawback) (*types.MsgClawbackResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if types.AUTHORITY_ADDRESS != msg.Authority { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidAuthority.Error(), types.AUTHORITY_ADDRESS, msg.Authority) + } + + account, found := k.GetTeamVestingAccount(ctx, msg.Id) + if !found { + return nil, sdkErrors.ErrNotFound + } + + // can not clawback before commencement + if msg.Clawback > 0 && msg.Clawback < account.Commencement { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidClawbackDate.Error()) + } + + // can not clawback before last claim time because claimed $KYVE can not be returned to team module + if msg.Clawback > 0 && msg.Clawback < account.LastClaimedTime { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidClawbackDate.Error()) + } + + account.Clawback = msg.Clawback + k.SetTeamVestingAccount(ctx, account) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventClawback{ + Id: account.Id, + Clawback: msg.Clawback, + Amount: account.TotalAllocation - getVestingMaxAmount(account), + }) + + return &types.MsgClawbackResponse{}, nil +} diff --git a/x/team/keeper/msg_server_clawback_test.go b/x/team/keeper/msg_server_clawback_test.go new file mode 100644 index 00000000..5ad23f4d --- /dev/null +++ b/x/team/keeper/msg_server_clawback_test.go @@ -0,0 +1,434 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + teamKeeper "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - msg_server_clawback.go + +* try_with_invalid_authority +* try_to_apply_clawback_before_tjoin +* try_to_apply_clawback_before_last_claim_time +* apply_clawback +* clawback_multiple_times +* clawback_multiple_accounts + +*/ + +var _ = Describe("msg_server_clawback.go", Ordered, func() { + s := i.NewCleanChainAtTime(int64(types.TGE)) + + BeforeEach(func() { + // init new clean chain + s = i.NewCleanChainAtTime(int64(types.TGE)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("try_with_invalid_authority", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - YEAR, + }) + + s.CommitAfterSeconds(1 * YEAR) + s.CommitAfterSeconds(1 * MONTH) // One month of unlock + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(uint64(694_444_444_444_444))) + Expect(status.TotalUnlockedAmount).To(Equal(uint64(28_935_185_185_185))) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(305_555_555_555_556))) + Expect(status.LockedVestedAmount).To(Equal(uint64(665_509_259_259_259))) + Expect(status.CurrentClaimableAmount).To(Equal(uint64(28_935_185_185_185))) + + // ACT + s.RunTxTeamError(&types.MsgClawback{ + Authority: i.ALICE, + Id: 0, + Clawback: uint64(s.Ctx().BlockTime().Unix()), + }) + + // ASSERT + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(Equal(uint64(0))) + }) + + It("try_to_apply_clawback_before_tjoin", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - YEAR, + }) + + s.CommitAfterSeconds(1 * YEAR) + s.CommitAfterSeconds(1 * MONTH) // One month of unlock + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(uint64(694_444_444_444_444))) + Expect(status.TotalUnlockedAmount).To(Equal(uint64(28_935_185_185_185))) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(305_555_555_555_556))) + Expect(status.LockedVestedAmount).To(Equal(uint64(665_509_259_259_259))) + Expect(status.CurrentClaimableAmount).To(Equal(uint64(28_935_185_185_185))) + s.PerformValidityChecks() + + // ACT + s.RunTxTeamError(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: types.TGE - YEAR - MONTH, // one month before tjoin + }) + + // ASSERT + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(BeZero()) + }) + + It("try_to_apply_clawback_before_last_claim_time", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - YEAR, + }) + + s.CommitAfterSeconds(1 * YEAR) + s.CommitAfterSeconds(1 * MONTH) // One month of unlock + acc, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(acc.UnlockedClaimed).To(Equal(uint64(0))) + Expect(acc.LastClaimedTime).To(Equal(uint64(0))) + + status := teamKeeper.GetVestingStatus(acc, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(uint64(694_444_444_444_444))) + Expect(status.TotalUnlockedAmount).To(Equal(uint64(28_935_185_185_185))) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(305_555_555_555_556))) + Expect(status.LockedVestedAmount).To(Equal(uint64(665_509_259_259_259))) + Expect(status.CurrentClaimableAmount).To(Equal(uint64(28_935_185_185_185))) + s.PerformValidityChecks() + + // ACT + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 10_935_185_185_185, + Recipient: i.ALICE, + }) + + s.RunTxTeamError(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: types.TGE, // before unlock claim + }) + + // ASSERT + acc, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(acc.Clawback).To(BeZero()) + Expect(acc.UnlockedClaimed).To(Equal(uint64(10_935_185_185_185))) + + status = teamKeeper.GetVestingStatus(acc, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(uint64(694_444_444_444_444))) + Expect(status.TotalUnlockedAmount).To(Equal(uint64(28_935_185_185_185))) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(305_555_555_555_556))) + Expect(status.LockedVestedAmount).To(Equal(uint64(665_509_259_259_259))) + Expect(status.CurrentClaimableAmount).To(Equal(uint64(18_000_000_000_000))) + }) + + It("apply_clawback", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - YEAR, + }) + + s.CommitAfterSeconds(1 * YEAR) + s.CommitAfterSeconds(1 * MONTH) // One month of unlock + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(uint64(694_444_444_444_444))) + Expect(status.TotalUnlockedAmount).To(Equal(uint64(28_935_185_185_185))) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(305_555_555_555_556))) + Expect(status.LockedVestedAmount).To(Equal(uint64(665_509_259_259_259))) + Expect(status.CurrentClaimableAmount).To(Equal(uint64(28_935_185_185_185))) + s.PerformValidityChecks() + + s.RunTxTeamSuccess(&types.MsgClaimUnlocked{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Amount: 20_000 * i.KYVE, + Recipient: i.ALICE, + }) + Expect(s.GetBalanceFromAddress(i.ALICE)).To(Equal(21_000 * i.KYVE)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.Authority).To(Equal(types.AUTHORITY_ADDRESS)) + Expect(info.TotalTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + Expect(info.IssuedTeamAllocation).To(Equal(1_000_000 * i.KYVE)) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAuthorityRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAuthorityRewards).To(BeZero()) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards)) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAccountRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAccountRewards).To(BeZero()) + Expect(info.AvailableAccountRewards).To(Equal(info.TotalAccountRewards)) + + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - 20_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + s.PerformValidityChecks() + + // ACT + s.RunTxTeamSuccess(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: uint64(s.Ctx().BlockTime().Unix()), + }) + + // ASSERT + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.Authority).To(Equal(types.AUTHORITY_ADDRESS)) + Expect(info.TotalTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + Expect(info.IssuedTeamAllocation).To(Equal(uint64(694_444_444_444_444))) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - uint64(694_444_444_444_444))) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAuthorityRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAuthorityRewards).To(BeZero()) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards)) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAccountRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAccountRewards).To(BeZero()) + Expect(info.AvailableAccountRewards).To(Equal(info.TotalAccountRewards)) + + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - 20_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status2 := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status2.RemainingUnvestedAmount).To(Equal(uint64(0))) + Expect(tva.Clawback).To(Equal(uint64(s.Ctx().BlockTime().Unix()))) + + s.CommitAfterSeconds(2 * YEAR) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + status3 := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.Authority).To(Equal(types.AUTHORITY_ADDRESS)) + Expect(info.TotalTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + Expect(info.IssuedTeamAllocation).To(Equal(uint64(694_444_444_444_444))) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - uint64(694_444_444_444_444))) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAuthorityRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAuthorityRewards).To(BeZero()) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards)) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAccountRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAccountRewards).To(BeZero()) + Expect(info.AvailableAccountRewards).To(Equal(info.TotalAccountRewards)) + + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards + info.TotalAccountRewards - 20_000*i.KYVE)) + Expect(info.TeamModuleBalance).To(Equal(info.RequiredModuleBalance)) + + Expect(status3.RemainingUnvestedAmount).To(Equal(uint64(0))) + Expect(status3.LockedVestedAmount).To(Equal(uint64(0))) + Expect(status3.CurrentClaimableAmount).To(Equal(status3.TotalUnlockedAmount - 20_000*i.KYVE)) + Expect(status3.LockedVestedAmount).To(Equal(uint64(0))) + Expect(status3.TotalVestedAmount).To(Equal(status.TotalVestedAmount)) + }) + + It("clawback_multiple_times", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE, + }) + + s.CommitAfterSeconds(3 * YEAR) // vesting is done and nothing has claimed yet + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(Equal(uint64(0))) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.TotalUnlockedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(0))) + Expect(status.LockedVestedAmount).To(Equal(uint64(0))) + Expect(status.CurrentClaimableAmount).To(Equal(1_000_000 * i.KYVE)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + s.PerformValidityChecks() + + // ACT + // clawback before cliff + s.RunTxTeamSuccess(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: types.TGE + MONTH, + }) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(Equal(types.TGE + MONTH)) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(BeZero()) + Expect(status.TotalUnlockedAmount).To(BeZero()) + Expect(status.RemainingUnvestedAmount).To(BeZero()) + Expect(status.LockedVestedAmount).To(BeZero()) + Expect(status.CurrentClaimableAmount).To(BeZero()) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + + // clawback right in the middle + s.RunTxTeamSuccess(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: types.TGE + YEAR + 6*MONTH, + }) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(Equal(types.TGE + YEAR + 6*MONTH)) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(500_000 * i.KYVE)) + Expect(status.TotalUnlockedAmount).To(Equal(500_000 * i.KYVE)) + Expect(status.RemainingUnvestedAmount).To(BeZero()) + Expect(status.LockedVestedAmount).To(BeZero()) + Expect(status.CurrentClaimableAmount).To(Equal(500_000 * i.KYVE)) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 500_000*i.KYVE)) + + // clawback after vesting period + s.RunTxTeamSuccess(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: types.TGE + 3*YEAR + 6*MONTH, + }) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(Equal(types.TGE + 3*YEAR + 6*MONTH)) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.TotalUnlockedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(0))) + Expect(status.LockedVestedAmount).To(Equal(uint64(0))) + Expect(status.CurrentClaimableAmount).To(Equal(1_000_000 * i.KYVE)) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + + // reset clawback + s.RunTxTeamSuccess(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 0, + Clawback: 0, + }) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(tva.Clawback).To(Equal(uint64(0))) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.TotalUnlockedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(0))) + Expect(status.LockedVestedAmount).To(Equal(uint64(0))) + Expect(status.CurrentClaimableAmount).To(Equal(1_000_000 * i.KYVE)) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + }) + + It("clawback_multiple_accounts", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE, + }) + + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 2_000_000 * i.KYVE, // 1m + Commencement: types.TGE + YEAR, + }) + + s.CommitAfterSeconds(4 * YEAR) // vesting is done and nothing has claimed yet + tva, _ := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 1) + Expect(tva.Clawback).To(Equal(uint64(0))) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status := teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(2_000_000 * i.KYVE)) + Expect(status.TotalUnlockedAmount).To(Equal(2_000_000 * i.KYVE)) + Expect(status.RemainingUnvestedAmount).To(Equal(uint64(0))) + Expect(status.LockedVestedAmount).To(Equal(uint64(0))) + Expect(status.CurrentClaimableAmount).To(Equal(2_000_000 * i.KYVE)) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 3_000_000*i.KYVE)) + s.PerformValidityChecks() + + // ACT + // clawback right in the middle + s.RunTxTeamSuccess(&types.MsgClawback{ + Authority: types.AUTHORITY_ADDRESS, + Id: 1, + Clawback: types.TGE + 2*YEAR + 6*MONTH, + }) + + tva, _ = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 1) + Expect(tva.Clawback).To(Equal(types.TGE + 2*YEAR + 6*MONTH)) + Expect(tva.UnlockedClaimed).To(Equal(uint64(0))) + Expect(tva.LastClaimedTime).To(Equal(uint64(0))) + + status = teamKeeper.GetVestingStatus(tva, uint64(s.Ctx().BlockTime().Unix())) + Expect(status.TotalVestedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.TotalUnlockedAmount).To(Equal(1_000_000 * i.KYVE)) + Expect(status.RemainingUnvestedAmount).To(BeZero()) + Expect(status.LockedVestedAmount).To(BeZero()) + Expect(status.CurrentClaimableAmount).To(Equal(1_000_000 * i.KYVE)) + + info = s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE - 1_000_000*i.KYVE)) + }) +}) diff --git a/x/team/keeper/msg_server_create_team_vesting_account.go b/x/team/keeper/msg_server_create_team_vesting_account.go new file mode 100644 index 00000000..e20b8539 --- /dev/null +++ b/x/team/keeper/msg_server_create_team_vesting_account.go @@ -0,0 +1,40 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + "github.com/KYVENetwork/chain/x/team/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkErrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +func (k msgServer) CreateTeamVestingAccount(goCtx context.Context, msg *types.MsgCreateTeamVestingAccount) (*types.MsgCreateTeamVestingAccountResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if types.AUTHORITY_ADDRESS != msg.Authority { + return nil, errors.Wrapf(sdkErrors.ErrLogic, types.ErrInvalidAuthority.Error(), types.AUTHORITY_ADDRESS, msg.Authority) + } + + if msg.TotalAllocation == 0 || msg.Commencement == 0 { + return nil, errors.Wrapf(sdkErrors.ErrLogic, "total allocation %v or commencement %v invalid", msg.TotalAllocation, msg.Commencement) + } + + // check if new team vesting account still has allocation left + if k.GetIssuedTeamAllocation(ctx)+msg.TotalAllocation > types.TEAM_ALLOCATION { + return nil, sdkErrors.Wrapf(sdkErrors.ErrLogic, types.ErrAvailableFundsTooLow.Error(), types.TEAM_ALLOCATION-k.GetIssuedTeamAllocation(ctx), msg.TotalAllocation) + } + + id := k.AppendTeamVestingAccount(ctx, types.TeamVestingAccount{ + TotalAllocation: msg.TotalAllocation, + Commencement: msg.Commencement, + }) + + _ = ctx.EventManager().EmitTypedEvent(&types.EventCreateTeamVestingAccount{ + Id: id, + TotalAllocation: msg.TotalAllocation, + Commencement: msg.Commencement, + }) + + return &types.MsgCreateTeamVestingAccountResponse{}, nil +} diff --git a/x/team/keeper/msg_server_create_team_vesting_account_test.go b/x/team/keeper/msg_server_create_team_vesting_account_test.go new file mode 100644 index 00000000..6f45f767 --- /dev/null +++ b/x/team/keeper/msg_server_create_team_vesting_account_test.go @@ -0,0 +1,191 @@ +package keeper_test + +import ( + i "github.com/KYVENetwork/chain/testutil/integration" + "github.com/KYVENetwork/chain/x/team/types" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +/* + +TEST CASES - msg_server_create_team_vesting_account.go + +* Create a first TVA with invalid authority +* Create a first TVA with zero allocation +* Create a first TVA with zero commencement +* Create a first TVA with commencement 3 years before TGE +* Create TVA with more Allocation than available +* Create multiple TVAs + +*/ + +var _ = Describe("msg_server_create_team_vesting_account.go", Ordered, func() { + // init new clean chain at TGE time + s := i.NewCleanChainAtTime(int64(types.TGE)) + + BeforeEach(func() { + // init new clean chain at TGE time + s = i.NewCleanChainAtTime(int64(types.TGE)) + }) + + AfterEach(func() { + s.PerformValidityChecks() + }) + + It("Create a first TVA with invalid authority", func() { + // ACT + s.RunTxTeamError(&types.MsgCreateTeamVestingAccount{ + Authority: i.ALICE, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - types.VESTING_DURATION, + }) + + // ASSERT + tvas := s.App().TeamKeeper.GetTeamVestingAccounts(s.Ctx()) + Expect(tvas).To(HaveLen(0)) + }) + + It("Create a first TVA with zero allocation", func() { + // ACT + s.RunTxTeamError(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 0, // 1m + Commencement: types.TGE - types.VESTING_DURATION, + }) + + // ASSERT + tvas := s.App().TeamKeeper.GetTeamVestingAccounts(s.Ctx()) + Expect(tvas).To(HaveLen(0)) + }) + + It("Create a first TVA with zero commencement", func() { + // ACT + s.RunTxTeamError(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: 0, + }) + + // ASSERT + tvas := s.App().TeamKeeper.GetTeamVestingAccounts(s.Ctx()) + Expect(tvas).To(HaveLen(0)) + }) + + It("Create a first TVA with commencement 3 years before TGE", func() { + // ACT + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - types.VESTING_DURATION, + }) + + // ASSERT + tva, found := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(found).To(BeTrue()) + Expect(tva.Commencement).To(Equal(types.TGE - types.VESTING_DURATION)) + Expect(tva.TotalAllocation).To(Equal(1_000_000 * i.KYVE)) + Expect(tva.Clawback).To(BeZero()) + Expect(tva.UnlockedClaimed).To(BeZero()) + Expect(tva.LastClaimedTime).To(BeZero()) + Expect(tva.TotalRewards).To(BeZero()) + Expect(tva.RewardsClaimed).To(BeZero()) + + _, found = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 1) + Expect(found).To(BeFalse()) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.Authority).To(Equal(types.AUTHORITY_ADDRESS)) + Expect(info.TotalTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + Expect(info.IssuedTeamAllocation).To(Equal(1_000_000 * i.KYVE)) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 1_000_000*i.KYVE)) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAuthorityRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAuthorityRewards).To(BeZero()) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards)) + + Expect(info.TotalAccountRewards).To(BeZero()) + Expect(info.ClaimedAccountRewards).To(BeZero()) + Expect(info.AvailableAccountRewards).To(BeZero()) + + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards)) + Expect(info.TeamModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards)) + }) + + It("Create TVA with more Allocation than available", func() { + // ACT + s.RunTxTeamError(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: types.TEAM_ALLOCATION + 1, // 1m + Commencement: types.TGE - types.VESTING_DURATION, + }) + + // ASSERT + _, found := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(found).To(BeFalse()) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.TotalTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + Expect(info.IssuedTeamAllocation).To(BeZero()) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + }) + + It("Create multiple TVAs", func() { + // ARRANGE + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 1_000_000 * i.KYVE, // 1m + Commencement: types.TGE - types.VESTING_DURATION, + }) + + // ACT + s.RunTxTeamSuccess(&types.MsgCreateTeamVestingAccount{ + Authority: types.AUTHORITY_ADDRESS, + TotalAllocation: 2_000_000 * i.KYVE, // 1m + Commencement: types.TGE + types.VESTING_DURATION, + }) + + // ASSERT + tvas := s.App().TeamKeeper.GetTeamVestingAccounts(s.Ctx()) + Expect(tvas).To(HaveLen(2)) + + tva, found := s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 0) + Expect(found).To(BeTrue()) + Expect(tva.Commencement).To(Equal(types.TGE - types.VESTING_DURATION)) + Expect(tva.TotalAllocation).To(Equal(1_000_000 * i.KYVE)) + Expect(tva.Clawback).To(BeZero()) + Expect(tva.UnlockedClaimed).To(BeZero()) + Expect(tva.LastClaimedTime).To(BeZero()) + Expect(tva.TotalRewards).To(BeZero()) + Expect(tva.RewardsClaimed).To(BeZero()) + + tva, found = s.App().TeamKeeper.GetTeamVestingAccount(s.Ctx(), 1) + Expect(found).To(BeTrue()) + Expect(tva.Commencement).To(Equal(types.TGE + types.VESTING_DURATION)) + Expect(tva.TotalAllocation).To(Equal(2_000_000 * i.KYVE)) + Expect(tva.Clawback).To(BeZero()) + Expect(tva.UnlockedClaimed).To(BeZero()) + Expect(tva.LastClaimedTime).To(BeZero()) + Expect(tva.TotalRewards).To(BeZero()) + Expect(tva.RewardsClaimed).To(BeZero()) + + info := s.App().TeamKeeper.GetTeamInfo(s.Ctx()) + Expect(info.Authority).To(Equal(types.AUTHORITY_ADDRESS)) + Expect(info.TotalTeamAllocation).To(Equal(types.TEAM_ALLOCATION)) + Expect(info.IssuedTeamAllocation).To(Equal(3_000_000 * i.KYVE)) + Expect(info.AvailableTeamAllocation).To(Equal(types.TEAM_ALLOCATION - 3_000_000*i.KYVE)) + + // NOTE: Disable because there is no inflation rewards here + // Expect(info.TotalAuthorityRewards).To(BeNumerically(">", uint64(0))) + Expect(info.ClaimedAuthorityRewards).To(BeZero()) + Expect(info.AvailableAuthorityRewards).To(Equal(info.TotalAuthorityRewards)) + + Expect(info.TotalAccountRewards).To(BeZero()) + Expect(info.ClaimedAccountRewards).To(BeZero()) + Expect(info.AvailableAccountRewards).To(BeZero()) + + Expect(info.RequiredModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards)) + Expect(info.TeamModuleBalance).To(Equal(types.TEAM_ALLOCATION + info.TotalAuthorityRewards)) + }) +}) diff --git a/x/team/module.go b/x/team/module.go new file mode 100644 index 00000000..4266c8db --- /dev/null +++ b/x/team/module.go @@ -0,0 +1,169 @@ +package team + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + abci "github.com/tendermint/tendermint/abci/types" + + // Bank + bankKeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + // Mint + mintKeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + // Team + "github.com/KYVENetwork/chain/x/team/client/cli" + "github.com/KYVENetwork/chain/x/team/keeper" + "github.com/KYVENetwork/chain/x/team/types" + // Upgrade + upgradeKeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// ---------------------------------------------------------------------------- +// AppModuleBasic +// ---------------------------------------------------------------------------- + +// AppModuleBasic implements the AppModuleBasic interface that defines the independent methods a Cosmos SDK module needs to implement. +type AppModuleBasic struct { + cdc codec.BinaryCodec +} + +func NewAppModuleBasic(cdc codec.BinaryCodec) AppModuleBasic { + return AppModuleBasic{cdc: cdc} +} + +// Name returns the name of the module as a string +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the amino codec for the module, which is used to marshal and unmarshal structs to/from []byte in order to persist them in the module's KVStore +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterCodec(cdc) +} + +// RegisterInterfaces registers a module's interface types and their concrete implementations as proto.Message +func (a AppModuleBasic) RegisterInterfaces(reg cdctypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns a default GenesisState for the module, marshalled to json.RawMessage. The default GenesisState need to be defined by the module developer and is primarily used for testing +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesis()) +} + +// ValidateGenesis used to validate the GenesisState, given in its json.RawMessage form +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var genState types.GenesisState + if err := cdc.UnmarshalJSON(bz, &genState); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + return genState.Validate() +} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + _ = types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)) +} + +// GetTxCmd returns the root Tx command for the module. The subcommands of this root command are used by end-users to generate new transactions containing messages defined in the module +func (a AppModuleBasic) GetTxCmd() *cobra.Command { + return cli.GetTxCmd() +} + +// GetQueryCmd returns the root query command for the module. The subcommands of this root command are used by end-users to generate new queries to the subset of the state defined by the module +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// ---------------------------------------------------------------------------- +// AppModule +// ---------------------------------------------------------------------------- + +// AppModule implements the AppModule interface that defines the inter-dependent methods that modules need to implement +type AppModule struct { + AppModuleBasic + + bk bankKeeper.Keeper + mk mintKeeper.Keeper + keeper keeper.Keeper + uk upgradeKeeper.Keeper +} + +func NewAppModule( + cdc codec.Codec, + bk bankKeeper.Keeper, + mk mintKeeper.Keeper, + keeper keeper.Keeper, + uk upgradeKeeper.Keeper, +) AppModule { + return AppModule{ + AppModuleBasic: NewAppModuleBasic(cdc), + bk: bk, + mk: mk, + keeper: keeper, + uk: uk, + } +} + +// Deprecated: use RegisterServices +func (am AppModule) Route() sdk.Route { return sdk.Route{} } + +// Deprecated: use RegisterServices +func (AppModule) QuerierRoute() string { return types.RouterKey } + +// Deprecated: use RegisterServices +func (am AppModule) LegacyQuerierHandler(_ *codec.LegacyAmino) sdk.Querier { + return nil +} + +// RegisterServices registers a gRPC query service to respond to the module-specific gRPC queries +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(am.keeper)) + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) +} + +// RegisterInvariants registers the invariants of the module. If an invariant deviates from its predicted value, the InvariantRegistry triggers appropriate logic (most often the chain will be halted) +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// InitGenesis performs the module's genesis initialization. It returns no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, gs json.RawMessage) []abci.ValidatorUpdate { + var genState types.GenesisState + // Initialize global index to index in genesis state + cdc.MustUnmarshalJSON(gs, &genState) + + InitGenesis(ctx, am.keeper, genState) + + return []abci.ValidatorUpdate{} +} + +// ExportGenesis returns the module's exported genesis state as raw JSON bytes. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + genState := ExportGenesis(ctx, am.keeper) + return cdc.MustMarshalJSON(genState) +} + +// ConsensusVersion is a sequence number for state-breaking change of the module. It should be incremented on each consensus-breaking change introduced by the module. To avoid wrong/empty versions, the initial version should be set to 1 +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock contains the logic that is automatically triggered at the beginning of each block +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + DistributeTeamInflation(ctx, am.bk, am.mk, am.keeper, am.uk) +} + +// EndBlock contains the logic that is automatically triggered at the end of each block +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} diff --git a/x/team/spec/01_concepts.md b/x/team/spec/01_concepts.md new file mode 100644 index 00000000..d206983b --- /dev/null +++ b/x/team/spec/01_concepts.md @@ -0,0 +1,55 @@ + + +# Concepts + +The team module is responsible for distributing the team allocation **16.5%** of the total +genesis supply to all eligible team members. It uses a special mechanism we call +"Two-Layer-Vesting" which is the reason why the team distribution received its own module. + +## Code Structure + +This module adheres to our global coding structure, defined [here](../../../CodeStructure.md). + +## Team Vesting Accounts + +Each team member gets a _TeamVestingAccount_ assigned. A TeamVestingAccount is not identified by an address, rather +its identified by an incrementing ID. It is tracked off-chain which Account ID belongs to which team member. + +Furthermore, a TeamVestingAccount tracks the _commencement_ (the official start date of working at KYVE) and a +_clawback_ (the official leave date after working at KYVE). The vesting amount depends on those two variables which +are custom for each team member. + +Finally, a TeamVestingAccount tracks the amount a team member has already claimed from his vesting account, this is +used later to calculate the current unlocked amount. + +## Vesting + +The total vesting duration is a constant set to 3 years and won't change anymore. $KYVE will vest 3 years linearly +from the commencement date. During vesting there is a cliff which is a constant set to 1 year and won't change +anymore. So for the first year the vested amount is zero, after the first day the cliff is over the vested amount +is 33.33% of the total allocation since one third of the vesting duration passed. + +Vested $KYVE can not be spent by the team member already. Vested $KYVE is just the first of the two layers of vesting. +If $KYVE has vested the team member is just eligible for inflation rewards which will be explained in detail below. + +## Unlocking + +Once $KYVE has successfully vested for a TeamVestingAccount it is still locked. In order for the team member to claim +his $KYVE they need to unlock. The Unlock starts either exactly 1 year after commencement or exactly 1 year after +TGE, whatever is the latter. The Unlock duration is constant and is set to 2 years. During unlocking there is +no cliff and $KYVE is unlocking at a linear rate based on seconds passed. + +## Clawback + +If a team members leaves KYVE during his vesting period the authority is allowed to clawback the +**remaining, unvested** $KYVE from the vesting account. A clawback is a unix timestamp of when the team member +left. The clawback can only be initiated by the team module authority. + +## Claim Unlocked $KYVE + +Once the $KYVE of a team member have unlocked the team member is allowed to claim them. In order to do that +the team member has to notify the authority that he wants to claim and provide it with a receiver address. +With that the authority can then claim for the team member and claim his $KYVE which will then get transferred +to the receiver address. diff --git a/x/team/spec/02_state.md b/x/team/spec/02_state.md new file mode 100644 index 00000000..b16c1ab1 --- /dev/null +++ b/x/team/spec/02_state.md @@ -0,0 +1,42 @@ + + +# State + +The module is mainly responsible for handling the team vesting account states. + +## TeamVestingAccounts + +The state is defined in one main proto file. + +### TeamVestingAccount + +Each team member gets a _TeamVestingAccount_ assigned. A TeamVestingAccount is not identified by an address, rather +its identified by an incrementing ID. It is tracked off-chain which Account ID belongs to which team member. + +The TeamVestingAccount stores the total amount of $KYVE the team member has and the commencement of the team member +(a unix timestamp of when the team member official joined KYVE). Furthermore, the clawback time (if the +team member leaves KYVE) and the already claimed $KYVE is stored. If clawback is zero the member did not receive +a clawback + +- TeamVestingAccountKey: `0x02 | Id -> ProtocolBuffer(teamVestingAccount)` +- TeamVestingAccountCountKey: `0x03 | Count -> ProtocolBuffer(teamVestingAccountCount)` + +```protobuf +syntax = "proto3"; + +message TeamVestingAccount { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // total_allocation is the number of tokens reserved for this team member. + uint64 total_allocation = 2; + // claimed is the amount of tokens already claimed by the account holder + uint64 claimed = 3; + // clawback is a unix timestamp of a clawback. If timestamp is zero + // it means that the account has not received a clawback + uint64 clawback = 4; + // commencement is the unix timestamp of the member's official start date. + uint64 commencement = 5; +} +``` \ No newline at end of file diff --git a/x/team/spec/03_messages.md b/x/team/spec/03_messages.md new file mode 100644 index 00000000..68e0d845 --- /dev/null +++ b/x/team/spec/03_messages.md @@ -0,0 +1,35 @@ + + +# Messages + +All txs of this module can be only called by the authority. + +## `MsgCreateTeamVestingAccount` + +Using this message, the authority can create a new _TeamVestingAccount_. +For that the authority has to provide the total allocation the team member +receives and the commencement date of the team member. The ID for the new +TeamVestingAccount will be automatically assigned on-chain. + +The tx fails if the team module has not enough funds anymore to create a +vesting account with the requested allocation, therefore ensuring the +authority does not overspend $KYVE. + +## `MsgClawback` + +If a team member leaves during his vesting period and the authority wants +to clawback the **remaining** unvested $KYVE the authority can call this +tx. It has to provide the account id and the unix timestamp of when the +clawback should be applied. The authority can update the clawback time of +an account multiple times and even remove it again if the time is `0`. + +## `MsgClaimUnlocked` + +If a team member wants to claim $KYVE of his unlocked amount he has to notify +the authority to do that for him. The team member has to provide a wallet +address to which the authority should claim the $KYVE to. In order to claim +the authority has to call this tx with the matching account ID and a recipient +address which can be the team members wallet directly or send it to a proxy address +instead to deal with e.g. taxes. diff --git a/x/team/spec/04_begin_block.md b/x/team/spec/04_begin_block.md new file mode 100644 index 00000000..61c7253e --- /dev/null +++ b/x/team/spec/04_begin_block.md @@ -0,0 +1,10 @@ + + +# BeginBlock + +The `x/team` module begin-block hook handles distribution of inflation rewards for the team. It calculates +the current share of vested $KYVE in the team module and takes this share from the current block rewards and +transfers them to inflation reward wallet controlled by the team authority. Those $KYVE are intended as +reward for early team members since vesting starts earlier for early team members. diff --git a/x/team/spec/05_events.md b/x/team/spec/05_events.md new file mode 100644 index 00000000..7000e05a --- /dev/null +++ b/x/team/spec/05_events.md @@ -0,0 +1,74 @@ + + +# Events + +The team module contains the following events: + +## EventCreateTeamVestingAccount + +EventCreateTeamVestingAccount indicates that a new team vesting account has been +created. + +```protobuf +syntax = "proto3"; + +message EventCreateTeamVestingAccount { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // total_allocation is the number of tokens reserved for this team member. + uint64 total_allocation = 2; + // commencement is the unix timestamp of the member's official start date. + uint64 commencement = 3; +} +``` + +It gets thrown from the following actions: + +- MsgCreateTeamVestingAccount + +## EventClaimedUnlocked + +EventClaimedUnlocked indicates that the authority has claimed unlocked $KYVE for a team +member. + +```protobuf +syntax = "proto3"; + +message EventClaimedUnlocked { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // amount is the number of tokens claimed from the unlocked amount. + uint64 amount = 2; + // recipient is the receiver address of the claim. + string recipient = 3; +} +``` + +It gets thrown from the following actions: + +- MsgClaimUnlocked + +## EventClawback + +EventClawback indicates that the authority has clawed back the remaining unvested $KYVE of a team +member vesting account. + +```protobuf +syntax = "proto3"; + +message EventClawback { + // id is a unique identify for each vesting account, tied to a single team member. + uint64 id = 1; + // clawback is a unix timestamp of a clawback. If timestamp is zero + // it means that the account has not received a clawback + uint64 clawback = 2; + // amount which got clawed back. + uint64 amount = 3; +} +``` + +It gets thrown from the following actions: + +- MsgClawback diff --git a/x/team/types/codec.go b/x/team/types/codec.go new file mode 100644 index 00000000..1f8f403a --- /dev/null +++ b/x/team/types/codec.go @@ -0,0 +1,22 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func RegisterCodec(_ *codec.LegacyAmino) {} + +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgCreateTeamVestingAccount{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgClaimUnlocked{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgClawback{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgClaimAccountRewards{}) + registry.RegisterImplementations((*sdk.Msg)(nil), &MsgClaimAuthorityRewards{}) +} + +var ( + Amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) diff --git a/x/team/types/errors.go b/x/team/types/errors.go new file mode 100644 index 00000000..db7777ae --- /dev/null +++ b/x/team/types/errors.go @@ -0,0 +1,10 @@ +package types + +import sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + +var ( + ErrInvalidAuthority = sdkerrors.Register(ModuleName, 1100, "invalid authority; expected %v, got %v") + ErrClaimAmountTooHigh = sdkerrors.Register(ModuleName, 1101, "tried to claim %v tkyve, unlocked amount is only %v tkyve") + ErrAvailableFundsTooLow = sdkerrors.Register(ModuleName, 1102, "team has %v tkyve available, asking for %v tkyve") + ErrInvalidClawbackDate = sdkerrors.Register(ModuleName, 1103, "The clawback can not be set earlier than the last claimed amount") +) diff --git a/x/team/types/events.pb.go b/x/team/types/events.pb.go new file mode 100644 index 00000000..3ab54f96 --- /dev/null +++ b/x/team/types/events.pb.go @@ -0,0 +1,1314 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/team/v1beta1/events.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateTeamVestingAccount is an event emitted when a new team vesting account gets created. +// emitted_by: MsgCreateTeamVestingAccount +type EventCreateTeamVestingAccount struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // total_allocation is the number of tokens reserved for this team member. + TotalAllocation uint64 `protobuf:"varint,2,opt,name=total_allocation,json=totalAllocation,proto3" json:"total_allocation,omitempty"` + // commencement is the unix timestamp of the member's official start date. + Commencement uint64 `protobuf:"varint,3,opt,name=commencement,proto3" json:"commencement,omitempty"` +} + +func (m *EventCreateTeamVestingAccount) Reset() { *m = EventCreateTeamVestingAccount{} } +func (m *EventCreateTeamVestingAccount) String() string { return proto.CompactTextString(m) } +func (*EventCreateTeamVestingAccount) ProtoMessage() {} +func (*EventCreateTeamVestingAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_198acea0777f469a, []int{0} +} +func (m *EventCreateTeamVestingAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventCreateTeamVestingAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventCreateTeamVestingAccount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventCreateTeamVestingAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventCreateTeamVestingAccount.Merge(m, src) +} +func (m *EventCreateTeamVestingAccount) XXX_Size() int { + return m.Size() +} +func (m *EventCreateTeamVestingAccount) XXX_DiscardUnknown() { + xxx_messageInfo_EventCreateTeamVestingAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_EventCreateTeamVestingAccount proto.InternalMessageInfo + +func (m *EventCreateTeamVestingAccount) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventCreateTeamVestingAccount) GetTotalAllocation() uint64 { + if m != nil { + return m.TotalAllocation + } + return 0 +} + +func (m *EventCreateTeamVestingAccount) GetCommencement() uint64 { + if m != nil { + return m.Commencement + } + return 0 +} + +// EventClawback is an event emitted when the authority claws back tokens from a team vesting account. +// emitted_by: MsgClawback +type EventClawback struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // clawback is a unix timestamp of a clawback. If timestamp is zero + // it means that the account has not received a clawback + Clawback uint64 `protobuf:"varint,2,opt,name=clawback,proto3" json:"clawback,omitempty"` + // amount which got clawed back. + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` +} + +func (m *EventClawback) Reset() { *m = EventClawback{} } +func (m *EventClawback) String() string { return proto.CompactTextString(m) } +func (*EventClawback) ProtoMessage() {} +func (*EventClawback) Descriptor() ([]byte, []int) { + return fileDescriptor_198acea0777f469a, []int{1} +} +func (m *EventClawback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventClawback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventClawback.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventClawback) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventClawback.Merge(m, src) +} +func (m *EventClawback) XXX_Size() int { + return m.Size() +} +func (m *EventClawback) XXX_DiscardUnknown() { + xxx_messageInfo_EventClawback.DiscardUnknown(m) +} + +var xxx_messageInfo_EventClawback proto.InternalMessageInfo + +func (m *EventClawback) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventClawback) GetClawback() uint64 { + if m != nil { + return m.Clawback + } + return 0 +} + +func (m *EventClawback) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +// EventClaimedUnlocked is an event emitted when the authority claims unlocked $KYVE for a recipient. +// emitted_by: MsgClaimUnlocked +type EventClaimedUnlocked struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // amount is the number of tokens claimed from the unlocked amount. + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // recipient is the receiver address of the claim. + Recipient string `protobuf:"bytes,3,opt,name=recipient,proto3" json:"recipient,omitempty"` +} + +func (m *EventClaimedUnlocked) Reset() { *m = EventClaimedUnlocked{} } +func (m *EventClaimedUnlocked) String() string { return proto.CompactTextString(m) } +func (*EventClaimedUnlocked) ProtoMessage() {} +func (*EventClaimedUnlocked) Descriptor() ([]byte, []int) { + return fileDescriptor_198acea0777f469a, []int{2} +} +func (m *EventClaimedUnlocked) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventClaimedUnlocked) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventClaimedUnlocked.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventClaimedUnlocked) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventClaimedUnlocked.Merge(m, src) +} +func (m *EventClaimedUnlocked) XXX_Size() int { + return m.Size() +} +func (m *EventClaimedUnlocked) XXX_DiscardUnknown() { + xxx_messageInfo_EventClaimedUnlocked.DiscardUnknown(m) +} + +var xxx_messageInfo_EventClaimedUnlocked proto.InternalMessageInfo + +func (m *EventClaimedUnlocked) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventClaimedUnlocked) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *EventClaimedUnlocked) GetRecipient() string { + if m != nil { + return m.Recipient + } + return "" +} + +// EventClaimInflationRewards is an event emitted when the authority claims inflation rewards for a recipient. +// emitted_by: MsgClaimInflationRewards +type EventClaimInflationRewards struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // amount is the amount of inflation rewards the authority should claim for the account holder + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // recipient is the receiver address of the claim. + Recipient string `protobuf:"bytes,3,opt,name=recipient,proto3" json:"recipient,omitempty"` +} + +func (m *EventClaimInflationRewards) Reset() { *m = EventClaimInflationRewards{} } +func (m *EventClaimInflationRewards) String() string { return proto.CompactTextString(m) } +func (*EventClaimInflationRewards) ProtoMessage() {} +func (*EventClaimInflationRewards) Descriptor() ([]byte, []int) { + return fileDescriptor_198acea0777f469a, []int{3} +} +func (m *EventClaimInflationRewards) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventClaimInflationRewards) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventClaimInflationRewards.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventClaimInflationRewards) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventClaimInflationRewards.Merge(m, src) +} +func (m *EventClaimInflationRewards) XXX_Size() int { + return m.Size() +} +func (m *EventClaimInflationRewards) XXX_DiscardUnknown() { + xxx_messageInfo_EventClaimInflationRewards.DiscardUnknown(m) +} + +var xxx_messageInfo_EventClaimInflationRewards proto.InternalMessageInfo + +func (m *EventClaimInflationRewards) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *EventClaimInflationRewards) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *EventClaimInflationRewards) GetRecipient() string { + if m != nil { + return m.Recipient + } + return "" +} + +// EventClaimAuthorityRewards is an event emitted when the authority claims its inflation rewards for a recipient. +// emitted_by: MsgClaimAuthorityRewards +type EventClaimAuthorityRewards struct { + // amount is the amount of inflation rewards the authority should claim for the account holder + Amount uint64 `protobuf:"varint,1,opt,name=amount,proto3" json:"amount,omitempty"` + // recipient is the receiver address of the claim. + Recipient string `protobuf:"bytes,2,opt,name=recipient,proto3" json:"recipient,omitempty"` +} + +func (m *EventClaimAuthorityRewards) Reset() { *m = EventClaimAuthorityRewards{} } +func (m *EventClaimAuthorityRewards) String() string { return proto.CompactTextString(m) } +func (*EventClaimAuthorityRewards) ProtoMessage() {} +func (*EventClaimAuthorityRewards) Descriptor() ([]byte, []int) { + return fileDescriptor_198acea0777f469a, []int{4} +} +func (m *EventClaimAuthorityRewards) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventClaimAuthorityRewards) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventClaimAuthorityRewards.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventClaimAuthorityRewards) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventClaimAuthorityRewards.Merge(m, src) +} +func (m *EventClaimAuthorityRewards) XXX_Size() int { + return m.Size() +} +func (m *EventClaimAuthorityRewards) XXX_DiscardUnknown() { + xxx_messageInfo_EventClaimAuthorityRewards.DiscardUnknown(m) +} + +var xxx_messageInfo_EventClaimAuthorityRewards proto.InternalMessageInfo + +func (m *EventClaimAuthorityRewards) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *EventClaimAuthorityRewards) GetRecipient() string { + if m != nil { + return m.Recipient + } + return "" +} + +func init() { + proto.RegisterType((*EventCreateTeamVestingAccount)(nil), "kyve.team.v1beta1.EventCreateTeamVestingAccount") + proto.RegisterType((*EventClawback)(nil), "kyve.team.v1beta1.EventClawback") + proto.RegisterType((*EventClaimedUnlocked)(nil), "kyve.team.v1beta1.EventClaimedUnlocked") + proto.RegisterType((*EventClaimInflationRewards)(nil), "kyve.team.v1beta1.EventClaimInflationRewards") + proto.RegisterType((*EventClaimAuthorityRewards)(nil), "kyve.team.v1beta1.EventClaimAuthorityRewards") +} + +func init() { proto.RegisterFile("kyve/team/v1beta1/events.proto", fileDescriptor_198acea0777f469a) } + +var fileDescriptor_198acea0777f469a = []byte{ + // 348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0x4f, 0x4b, 0xf3, 0x40, + 0x10, 0xc6, 0x9b, 0xbc, 0x2f, 0xc5, 0x2e, 0xfe, 0x0d, 0x22, 0xa5, 0xe8, 0x22, 0x39, 0xd9, 0x4b, + 0x42, 0xf1, 0x13, 0xd4, 0xd2, 0x83, 0x08, 0x1e, 0xa2, 0x16, 0x14, 0x41, 0x36, 0x9b, 0xb1, 0x5d, + 0x92, 0xdd, 0x2d, 0xc9, 0xb4, 0xb5, 0x17, 0x3f, 0x83, 0x1f, 0xcb, 0x63, 0x8f, 0x1e, 0xa5, 0xfd, + 0x22, 0x92, 0x34, 0x69, 0xaa, 0xc5, 0x9b, 0xc7, 0x79, 0x9e, 0x99, 0xdf, 0x33, 0x03, 0x43, 0x68, + 0x38, 0x1d, 0x83, 0x8b, 0xc0, 0xa4, 0x3b, 0x6e, 0xf9, 0x80, 0xac, 0xe5, 0xc2, 0x18, 0x14, 0x26, + 0xce, 0x30, 0xd6, 0xa8, 0xad, 0x83, 0xd4, 0x77, 0x52, 0xdf, 0xc9, 0x7d, 0xfb, 0x95, 0x9c, 0x74, + 0xd3, 0x96, 0x4e, 0x0c, 0x0c, 0xe1, 0x16, 0x98, 0xec, 0x41, 0x82, 0x42, 0xf5, 0xdb, 0x9c, 0xeb, + 0x91, 0x42, 0x6b, 0x97, 0x98, 0x22, 0xa8, 0x1b, 0xa7, 0xc6, 0xd9, 0x7f, 0xcf, 0x14, 0x81, 0xd5, + 0x24, 0xfb, 0xa8, 0x91, 0x45, 0x4f, 0x2c, 0x8a, 0x34, 0x67, 0x28, 0xb4, 0xaa, 0x9b, 0x99, 0xbb, + 0x97, 0xe9, 0xed, 0x95, 0x6c, 0xd9, 0x64, 0x9b, 0x6b, 0x29, 0x41, 0x71, 0x90, 0xa0, 0xb0, 0xfe, + 0x2f, 0x6b, 0xfb, 0xa6, 0xd9, 0x37, 0x64, 0x67, 0x99, 0x1f, 0xb1, 0x89, 0xcf, 0x78, 0xb8, 0x91, + 0xd7, 0x20, 0x5b, 0x3c, 0xf7, 0xf2, 0x9c, 0x55, 0x6d, 0x1d, 0x91, 0x2a, 0x93, 0xe9, 0x96, 0x39, + 0x3a, 0xaf, 0xec, 0x47, 0x72, 0x58, 0x40, 0x85, 0x84, 0xe0, 0x4e, 0x45, 0x9a, 0x87, 0x10, 0x6c, + 0xb0, 0xcb, 0x79, 0x73, 0x7d, 0xde, 0x3a, 0x26, 0xb5, 0x18, 0xb8, 0x18, 0x8a, 0x62, 0xeb, 0x9a, + 0x57, 0x0a, 0xb6, 0x4f, 0x1a, 0x25, 0xfd, 0x52, 0x3d, 0x47, 0xd9, 0xb5, 0x1e, 0x4c, 0x58, 0x1c, + 0x24, 0x7f, 0x94, 0xe1, 0xad, 0x67, 0xb4, 0x47, 0x38, 0xd0, 0xb1, 0xc0, 0x69, 0x91, 0x51, 0x32, + 0x8d, 0xdf, 0x99, 0xe6, 0x0f, 0xe6, 0x45, 0xe7, 0x7d, 0x4e, 0x8d, 0xd9, 0x9c, 0x1a, 0x9f, 0x73, + 0x6a, 0xbc, 0x2d, 0x68, 0x65, 0xb6, 0xa0, 0x95, 0x8f, 0x05, 0xad, 0x3c, 0x34, 0xfb, 0x02, 0x07, + 0x23, 0xdf, 0xe1, 0x5a, 0xba, 0x57, 0xf7, 0xbd, 0xee, 0x35, 0xe0, 0x44, 0xc7, 0xa1, 0xcb, 0x07, + 0x4c, 0x28, 0xf7, 0x65, 0xf9, 0x51, 0x38, 0x1d, 0x42, 0xe2, 0x57, 0xb3, 0x4f, 0x3a, 0xff, 0x0a, + 0x00, 0x00, 0xff, 0xff, 0x5f, 0x05, 0x1f, 0x86, 0x6b, 0x02, 0x00, 0x00, +} + +func (m *EventCreateTeamVestingAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventCreateTeamVestingAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventCreateTeamVestingAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Commencement != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Commencement)) + i-- + dAtA[i] = 0x18 + } + if m.TotalAllocation != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.TotalAllocation)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventClawback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventClawback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventClawback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if m.Clawback != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Clawback)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventClaimedUnlocked) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventClaimedUnlocked) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventClaimedUnlocked) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Recipient) > 0 { + i -= len(m.Recipient) + copy(dAtA[i:], m.Recipient) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Recipient))) + i-- + dAtA[i] = 0x1a + } + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventClaimInflationRewards) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventClaimInflationRewards) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventClaimInflationRewards) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Recipient) > 0 { + i -= len(m.Recipient) + copy(dAtA[i:], m.Recipient) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Recipient))) + i-- + dAtA[i] = 0x1a + } + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *EventClaimAuthorityRewards) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventClaimAuthorityRewards) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventClaimAuthorityRewards) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Recipient) > 0 { + i -= len(m.Recipient) + copy(dAtA[i:], m.Recipient) + i = encodeVarintEvents(dAtA, i, uint64(len(m.Recipient))) + i-- + dAtA[i] = 0x12 + } + if m.Amount != 0 { + i = encodeVarintEvents(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { + offset -= sovEvents(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventCreateTeamVestingAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + if m.TotalAllocation != 0 { + n += 1 + sovEvents(uint64(m.TotalAllocation)) + } + if m.Commencement != 0 { + n += 1 + sovEvents(uint64(m.Commencement)) + } + return n +} + +func (m *EventClawback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + if m.Clawback != 0 { + n += 1 + sovEvents(uint64(m.Clawback)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + return n +} + +func (m *EventClaimedUnlocked) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventClaimInflationRewards) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovEvents(uint64(m.Id)) + } + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func (m *EventClaimAuthorityRewards) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Amount != 0 { + n += 1 + sovEvents(uint64(m.Amount)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovEvents(uint64(l)) + } + return n +} + +func sovEvents(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvents(x uint64) (n int) { + return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventCreateTeamVestingAccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventCreateTeamVestingAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventCreateTeamVestingAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalAllocation", wireType) + } + m.TotalAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commencement", wireType) + } + m.Commencement = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commencement |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventClawback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventClawback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventClawback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Clawback", wireType) + } + m.Clawback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Clawback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventClaimedUnlocked) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventClaimedUnlocked: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventClaimedUnlocked: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventClaimInflationRewards) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventClaimInflationRewards: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventClaimInflationRewards: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventClaimAuthorityRewards) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventClaimAuthorityRewards: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventClaimAuthorityRewards: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvents + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvents + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvents + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvents(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvents + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvents(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvents + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvents + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvents + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvents + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/team/types/genesis.go b/x/team/types/genesis.go new file mode 100644 index 00000000..22ac928f --- /dev/null +++ b/x/team/types/genesis.go @@ -0,0 +1,31 @@ +package types + +import "fmt" + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesis() *GenesisState { + return &GenesisState{} +} + +// Validate performs basic genesis state validation returning an error upon any failure. +func (gs GenesisState) Validate() error { + // Check for duplicated index in account entries + accountsIndexMap := make(map[string]struct{}) + + for _, elem := range gs.AccountList { + index := string(TeamVestingAccountKeyPrefix(elem.Id)) + if _, ok := accountsIndexMap[index]; ok { + return fmt.Errorf("duplicated account id %v", elem) + } + accountsIndexMap[index] = struct{}{} + if elem.Id >= gs.AccountCount { + return fmt.Errorf("account id higher than account count %v", elem) + } + } + + if gs.Authority.RewardsClaimed > gs.Authority.TotalRewards { + return fmt.Errorf("claimed is greater than total rewards %#v", gs.Authority) + } + + return nil +} diff --git a/x/team/types/genesis.pb.go b/x/team/types/genesis.pb.go new file mode 100644 index 00000000..0fcb0f15 --- /dev/null +++ b/x/team/types/genesis.pb.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/team/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the team module's genesis state. +type GenesisState struct { + // authority ... + Authority Authority `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority"` + // account_list ... + AccountList []TeamVestingAccount `protobuf:"bytes,3,rep,name=account_list,json=accountList,proto3" json:"account_list"` + // account_count ... + AccountCount uint64 `protobuf:"varint,4,opt,name=account_count,json=accountCount,proto3" json:"account_count,omitempty"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_6a6a0401797f9ed5, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetAuthority() Authority { + if m != nil { + return m.Authority + } + return Authority{} +} + +func (m *GenesisState) GetAccountList() []TeamVestingAccount { + if m != nil { + return m.AccountList + } + return nil +} + +func (m *GenesisState) GetAccountCount() uint64 { + if m != nil { + return m.AccountCount + } + return 0 +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "kyve.team.v1beta1.GenesisState") +} + +func init() { proto.RegisterFile("kyve/team/v1beta1/genesis.proto", fileDescriptor_6a6a0401797f9ed5) } + +var fileDescriptor_6a6a0401797f9ed5 = []byte{ + // 279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0xcf, 0xae, 0x2c, 0x4b, + 0xd5, 0x2f, 0x49, 0x4d, 0xcc, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x4f, 0x4f, + 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x04, 0x29, 0xd0, + 0x03, 0x29, 0xd0, 0x83, 0x2a, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xea, 0x83, 0x58, + 0x10, 0x85, 0x52, 0x32, 0x98, 0x26, 0x81, 0x75, 0x81, 0x65, 0x95, 0x8e, 0x32, 0x72, 0xf1, 0xb8, + 0x43, 0x0c, 0x0e, 0x2e, 0x49, 0x2c, 0x49, 0x15, 0x72, 0xe0, 0xe2, 0x4c, 0x2c, 0x2d, 0xc9, 0xc8, + 0x2f, 0xca, 0x2c, 0xa9, 0x94, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd1, 0xc3, 0xb0, 0x4b, + 0xcf, 0x11, 0xa6, 0xc6, 0x89, 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0x84, 0x26, 0x21, 0x3f, 0x2e, + 0x9e, 0xc4, 0xe4, 0xe4, 0xfc, 0xd2, 0xbc, 0x92, 0xf8, 0x9c, 0xcc, 0xe2, 0x12, 0x09, 0x66, 0x05, + 0x66, 0x0d, 0x6e, 0x23, 0x55, 0x2c, 0x86, 0x84, 0xa4, 0x26, 0xe6, 0x86, 0xa5, 0x16, 0x97, 0x64, + 0xe6, 0xa5, 0x3b, 0x42, 0x74, 0x40, 0x4d, 0xe3, 0x86, 0x1a, 0xe0, 0x93, 0x59, 0x5c, 0x22, 0xa4, + 0xcc, 0xc5, 0x0b, 0x33, 0x0f, 0x4c, 0x4a, 0xb0, 0x28, 0x30, 0x6a, 0xb0, 0x04, 0xc1, 0x2c, 0x71, + 0x06, 0xeb, 0x73, 0x3e, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, + 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xcd, 0xf4, + 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xef, 0xc8, 0x30, 0x57, 0xbf, 0xd4, + 0x92, 0xf2, 0xfc, 0xa2, 0x6c, 0xfd, 0xe4, 0x8c, 0xc4, 0xcc, 0x3c, 0xfd, 0x0a, 0x48, 0xc8, 0x94, + 0x54, 0x16, 0xa4, 0x16, 0x27, 0xb1, 0x81, 0xc3, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xee, + 0x1b, 0x1e, 0x76, 0x7d, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AccountCount != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.AccountCount)) + i-- + dAtA[i] = 0x20 + } + if len(m.AccountList) > 0 { + for iNdEx := len(m.AccountList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AccountList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Authority.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Authority.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.AccountList) > 0 { + for _, e := range m.AccountList { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if m.AccountCount != 0 { + n += 1 + sovGenesis(uint64(m.AccountCount)) + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Authority.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccountList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccountList = append(m.AccountList, TeamVestingAccount{}) + if err := m.AccountList[len(m.AccountList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccountCount", wireType) + } + m.AccountCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AccountCount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/team/types/keys.go b/x/team/types/keys.go new file mode 100644 index 00000000..b1657857 --- /dev/null +++ b/x/team/types/keys.go @@ -0,0 +1,51 @@ +package types + +import ( + "github.com/KYVENetwork/chain/util" +) + +const ( + // ModuleName defines the module name + ModuleName = "team" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey defines the module's message routing key + RouterKey = ModuleName + + // MemStoreKey defines the in-memory store key + MemStoreKey = "mem_team" +) + +// Team module account address +// kyve1e29j95xmsw3zmvtrk4st8e89z5n72v7nf70ma4 + +// VESTING_DURATION 3 years +const VESTING_DURATION uint64 = 3 * 365 * 24 * 3600 // 3 * 365 * 24 * 3600 + +// UNLOCK_DURATION 2 years +const UNLOCK_DURATION uint64 = 2 * 365 * 24 * 3600 // 2 * 365 * 24 * 3600 + +// CLIFF_DURATION 1 year +const CLIFF_DURATION uint64 = 1 * 365 * 24 * 3600 // 1 * 365 * 24 * 3600 + +// AUTHORITY_ADDRESS is initialised in types.go by the init function which uses linker flags +var AUTHORITY_ADDRESS = "" + +// TEAM_ALLOCATION is initialised in types.go by the init function which uses linker flags +var TEAM_ALLOCATION uint64 = 0 + +// TGE is initialised in types.go by the init function which uses linker flags +var TGE uint64 = 0 + +var ( + ParamsKey = []byte{0x00} + AuthorityKey = []byte{0x01} + TeamVestingAccountKey = []byte{0x02} + TeamVestingAccountCountKey = []byte{0x03} +) + +func TeamVestingAccountKeyPrefix(id uint64) []byte { + return util.GetByteKey(id) +} diff --git a/x/team/types/message_claim_account_rewards.go b/x/team/types/message_claim_account_rewards.go new file mode 100644 index 00000000..d084addc --- /dev/null +++ b/x/team/types/message_claim_account_rewards.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgClaimAccountRewards = "claim_account_rewards" + +var _ sdk.Msg = &MsgClaimAccountRewards{} + +func (msg *MsgClaimAccountRewards) Route() string { + return RouterKey +} + +func (msg *MsgClaimAccountRewards) Type() string { + return TypeMsgClaimAccountRewards +} + +func (msg *MsgClaimAccountRewards) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgClaimAccountRewards) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgClaimAccountRewards) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid authority address (%s)", err) + } + return nil +} diff --git a/x/team/types/message_claim_authority_rewards.go b/x/team/types/message_claim_authority_rewards.go new file mode 100644 index 00000000..d1e417c6 --- /dev/null +++ b/x/team/types/message_claim_authority_rewards.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgClaimAuthorityRewards = "claim_authority_rewards" + +var _ sdk.Msg = &MsgClaimAuthorityRewards{} + +func (msg *MsgClaimAuthorityRewards) Route() string { + return RouterKey +} + +func (msg *MsgClaimAuthorityRewards) Type() string { + return TypeMsgClaimAuthorityRewards +} + +func (msg *MsgClaimAuthorityRewards) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgClaimAuthorityRewards) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgClaimAuthorityRewards) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid authority address (%s)", err) + } + return nil +} diff --git a/x/team/types/message_claim_unlocked.go b/x/team/types/message_claim_unlocked.go new file mode 100644 index 00000000..1e4f73cf --- /dev/null +++ b/x/team/types/message_claim_unlocked.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgClaimUnlocked = "claim_unlocked" + +var _ sdk.Msg = &MsgClaimUnlocked{} + +func (msg *MsgClaimUnlocked) Route() string { + return RouterKey +} + +func (msg *MsgClaimUnlocked) Type() string { + return TypeMsgClaimUnlocked +} + +func (msg *MsgClaimUnlocked) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgClaimUnlocked) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgClaimUnlocked) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid authority address (%s)", err) + } + return nil +} diff --git a/x/team/types/message_clawback.go b/x/team/types/message_clawback.go new file mode 100644 index 00000000..8fb6bdd9 --- /dev/null +++ b/x/team/types/message_clawback.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgClawback = "clawback" + +var _ sdk.Msg = &MsgClawback{} + +func (msg *MsgClawback) Route() string { + return RouterKey +} + +func (msg *MsgClawback) Type() string { + return TypeMsgClawback +} + +func (msg *MsgClawback) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgClawback) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgClawback) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid authority address (%s)", err) + } + return nil +} diff --git a/x/team/types/message_create_team_vesting_account.go b/x/team/types/message_create_team_vesting_account.go new file mode 100644 index 00000000..ec454db2 --- /dev/null +++ b/x/team/types/message_create_team_vesting_account.go @@ -0,0 +1,39 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const TypeMsgCreateTeamVestingAccount = "create_team_vesting_account" + +var _ sdk.Msg = &MsgCreateTeamVestingAccount{} + +func (msg *MsgCreateTeamVestingAccount) Route() string { + return RouterKey +} + +func (msg *MsgCreateTeamVestingAccount) Type() string { + return TypeMsgCreateTeamVestingAccount +} + +func (msg *MsgCreateTeamVestingAccount) GetSigners() []sdk.AccAddress { + creator, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + panic(err) + } + return []sdk.AccAddress{creator} +} + +func (msg *MsgCreateTeamVestingAccount) GetSignBytes() []byte { + bz := ModuleCdc.MustMarshalJSON(msg) + return sdk.MustSortJSON(bz) +} + +func (msg *MsgCreateTeamVestingAccount) ValidateBasic() error { + _, err := sdk.AccAddressFromBech32(msg.Authority) + if err != nil { + return sdkerrors.Wrapf(sdkerrors.ErrInvalidAddress, "invalid authority address (%s)", err) + } + return nil +} diff --git a/x/team/types/query.pb.go b/x/team/types/query.pb.go new file mode 100644 index 00000000..045454c4 --- /dev/null +++ b/x/team/types/query.pb.go @@ -0,0 +1,3639 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/team/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryAccountsRequest is request type for the Query/TeamInfo RPC method. +type QueryTeamInfoRequest struct { +} + +func (m *QueryTeamInfoRequest) Reset() { *m = QueryTeamInfoRequest{} } +func (m *QueryTeamInfoRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTeamInfoRequest) ProtoMessage() {} +func (*QueryTeamInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{0} +} +func (m *QueryTeamInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamInfoRequest.Merge(m, src) +} +func (m *QueryTeamInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamInfoRequest proto.InternalMessageInfo + +// QueryAccountsResponse is response type for the Query/TeamInfo RPC method. +type QueryTeamInfoResponse struct { + // authority is the authorities address + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // total_team_allocation is the total allocation in $KYVE the team module has in order to reward team members + TotalTeamAllocation uint64 `protobuf:"varint,2,opt,name=total_team_allocation,json=totalTeamAllocation,proto3" json:"total_team_allocation,omitempty"` + // issued_team_allocation is the amount in $KYVE tied to team vesting accounts and which are not available anymore + IssuedTeamAllocation uint64 `protobuf:"varint,3,opt,name=issued_team_allocation,json=issuedTeamAllocation,proto3" json:"issued_team_allocation,omitempty"` + // available_team_allocation is the amount in $KYVE with which further team vesting accounts can be created. + // if the available amount is zero no new vesting accounts can be created + AvailableTeamAllocation uint64 `protobuf:"varint,4,opt,name=available_team_allocation,json=availableTeamAllocation,proto3" json:"available_team_allocation,omitempty"` + // total_authority_rewards is the amount in $KYVE the authority has earned in total with inflation rewards. + // Those rewards can be payed out for different purposes + TotalAuthorityRewards uint64 `protobuf:"varint,5,opt,name=total_authority_rewards,json=totalAuthorityRewards,proto3" json:"total_authority_rewards,omitempty"` + // claimed_authority_rewards is the amount in $KYVE of how much the authority already claimed + ClaimedAuthorityRewards uint64 `protobuf:"varint,6,opt,name=claimed_authority_rewards,json=claimedAuthorityRewards,proto3" json:"claimed_authority_rewards,omitempty"` + // available_authority_rewards is the amount in $KYVE of how much rewards the authority can claim right now + AvailableAuthorityRewards uint64 `protobuf:"varint,7,opt,name=available_authority_rewards,json=availableAuthorityRewards,proto3" json:"available_authority_rewards,omitempty"` + // total_account_rewards is the amount in $KYVE all team vesting accounts have ever received + TotalAccountRewards uint64 `protobuf:"varint,8,opt,name=total_account_rewards,json=totalAccountRewards,proto3" json:"total_account_rewards,omitempty"` + // claimed_account_rewards is the amount in $KYVE all team vesting accounts have ever claimed + ClaimedAccountRewards uint64 `protobuf:"varint,9,opt,name=claimed_account_rewards,json=claimedAccountRewards,proto3" json:"claimed_account_rewards,omitempty"` + // available_account_rewards is the total amount of $KYVE all team vesting accounts can currently claim + AvailableAccountRewards uint64 `protobuf:"varint,10,opt,name=available_account_rewards,json=availableAccountRewards,proto3" json:"available_account_rewards,omitempty"` + // required_module_balance is the balance the team module should have. If this is less than the module balance + // something went wrong + RequiredModuleBalance uint64 `protobuf:"varint,11,opt,name=required_module_balance,json=requiredModuleBalance,proto3" json:"required_module_balance,omitempty"` + // team_module_balance is the team module balance in $KYVE + TeamModuleBalance uint64 `protobuf:"varint,12,opt,name=team_module_balance,json=teamModuleBalance,proto3" json:"team_module_balance,omitempty"` +} + +func (m *QueryTeamInfoResponse) Reset() { *m = QueryTeamInfoResponse{} } +func (m *QueryTeamInfoResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTeamInfoResponse) ProtoMessage() {} +func (*QueryTeamInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{1} +} +func (m *QueryTeamInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamInfoResponse.Merge(m, src) +} +func (m *QueryTeamInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamInfoResponse proto.InternalMessageInfo + +func (m *QueryTeamInfoResponse) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *QueryTeamInfoResponse) GetTotalTeamAllocation() uint64 { + if m != nil { + return m.TotalTeamAllocation + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetIssuedTeamAllocation() uint64 { + if m != nil { + return m.IssuedTeamAllocation + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetAvailableTeamAllocation() uint64 { + if m != nil { + return m.AvailableTeamAllocation + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetTotalAuthorityRewards() uint64 { + if m != nil { + return m.TotalAuthorityRewards + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetClaimedAuthorityRewards() uint64 { + if m != nil { + return m.ClaimedAuthorityRewards + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetAvailableAuthorityRewards() uint64 { + if m != nil { + return m.AvailableAuthorityRewards + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetTotalAccountRewards() uint64 { + if m != nil { + return m.TotalAccountRewards + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetClaimedAccountRewards() uint64 { + if m != nil { + return m.ClaimedAccountRewards + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetAvailableAccountRewards() uint64 { + if m != nil { + return m.AvailableAccountRewards + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetRequiredModuleBalance() uint64 { + if m != nil { + return m.RequiredModuleBalance + } + return 0 +} + +func (m *QueryTeamInfoResponse) GetTeamModuleBalance() uint64 { + if m != nil { + return m.TeamModuleBalance + } + return 0 +} + +// QueryAccountsRequest is request type for the Query/TeamVestingAccounts RPC method. +type QueryTeamVestingAccountsRequest struct { +} + +func (m *QueryTeamVestingAccountsRequest) Reset() { *m = QueryTeamVestingAccountsRequest{} } +func (m *QueryTeamVestingAccountsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingAccountsRequest) ProtoMessage() {} +func (*QueryTeamVestingAccountsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{2} +} +func (m *QueryTeamVestingAccountsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingAccountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingAccountsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingAccountsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingAccountsRequest.Merge(m, src) +} +func (m *QueryTeamVestingAccountsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingAccountsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingAccountsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingAccountsRequest proto.InternalMessageInfo + +// QueryAccountsResponse is response type for the Query/TeamVestingAccounts RPC method. +type QueryTeamVestingAccountsResponse struct { + // accounts holds all the team vesting accounts of this module. + Accounts []TeamVestingAccount `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts"` +} + +func (m *QueryTeamVestingAccountsResponse) Reset() { *m = QueryTeamVestingAccountsResponse{} } +func (m *QueryTeamVestingAccountsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingAccountsResponse) ProtoMessage() {} +func (*QueryTeamVestingAccountsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{3} +} +func (m *QueryTeamVestingAccountsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingAccountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingAccountsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingAccountsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingAccountsResponse.Merge(m, src) +} +func (m *QueryTeamVestingAccountsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingAccountsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingAccountsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingAccountsResponse proto.InternalMessageInfo + +func (m *QueryTeamVestingAccountsResponse) GetAccounts() []TeamVestingAccount { + if m != nil { + return m.Accounts + } + return nil +} + +// QueryTeamVestingAccountRequest is request type for the Query/TeamVestingAccount RPC method. +type QueryTeamVestingAccountRequest struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *QueryTeamVestingAccountRequest) Reset() { *m = QueryTeamVestingAccountRequest{} } +func (m *QueryTeamVestingAccountRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingAccountRequest) ProtoMessage() {} +func (*QueryTeamVestingAccountRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{4} +} +func (m *QueryTeamVestingAccountRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingAccountRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingAccountRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingAccountRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingAccountRequest.Merge(m, src) +} +func (m *QueryTeamVestingAccountRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingAccountRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingAccountRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingAccountRequest proto.InternalMessageInfo + +func (m *QueryTeamVestingAccountRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// QueryTeamVestingAccountResponse is the response type for the Query/TeamVestingAccount RPC method. +type QueryTeamVestingAccountResponse struct { + // account holds the requested team vesting account + Account TeamVestingAccount `protobuf:"bytes,1,opt,name=account,proto3" json:"account"` +} + +func (m *QueryTeamVestingAccountResponse) Reset() { *m = QueryTeamVestingAccountResponse{} } +func (m *QueryTeamVestingAccountResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingAccountResponse) ProtoMessage() {} +func (*QueryTeamVestingAccountResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{5} +} +func (m *QueryTeamVestingAccountResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingAccountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingAccountResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingAccountResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingAccountResponse.Merge(m, src) +} +func (m *QueryTeamVestingAccountResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingAccountResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingAccountResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingAccountResponse proto.InternalMessageInfo + +func (m *QueryTeamVestingAccountResponse) GetAccount() TeamVestingAccount { + if m != nil { + return m.Account + } + return TeamVestingAccount{} +} + +// QueryTeamCurrentVestingStatusRequest is request type for the Query/TeamCurrentVestingStatus RPC method. +type QueryTeamVestingStatusRequest struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *QueryTeamVestingStatusRequest) Reset() { *m = QueryTeamVestingStatusRequest{} } +func (m *QueryTeamVestingStatusRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingStatusRequest) ProtoMessage() {} +func (*QueryTeamVestingStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{6} +} +func (m *QueryTeamVestingStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingStatusRequest.Merge(m, src) +} +func (m *QueryTeamVestingStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingStatusRequest proto.InternalMessageInfo + +func (m *QueryTeamVestingStatusRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +// QueryTeamCurrentVestingStatusResponse is the response type for the Query/TeamCurrentVestingStatus RPC method. +type QueryTeamVestingStatusResponse struct { + // request_date .. + RequestDate string `protobuf:"bytes,1,opt,name=request_date,json=requestDate,proto3" json:"request_date,omitempty"` + // plan ... + Plan *QueryVestingPlan `protobuf:"bytes,2,opt,name=plan,proto3" json:"plan,omitempty"` + // status .. + Status *QueryVestingStatus `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` +} + +func (m *QueryTeamVestingStatusResponse) Reset() { *m = QueryTeamVestingStatusResponse{} } +func (m *QueryTeamVestingStatusResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingStatusResponse) ProtoMessage() {} +func (*QueryTeamVestingStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{7} +} +func (m *QueryTeamVestingStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingStatusResponse.Merge(m, src) +} +func (m *QueryTeamVestingStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingStatusResponse proto.InternalMessageInfo + +func (m *QueryTeamVestingStatusResponse) GetRequestDate() string { + if m != nil { + return m.RequestDate + } + return "" +} + +func (m *QueryTeamVestingStatusResponse) GetPlan() *QueryVestingPlan { + if m != nil { + return m.Plan + } + return nil +} + +func (m *QueryTeamVestingStatusResponse) GetStatus() *QueryVestingStatus { + if m != nil { + return m.Status + } + return nil +} + +// QueryTeamVestingStatusByTimeRequest is request type for the Query/TeamCurrentVestingByTimeStatus RPC method. +type QueryTeamVestingStatusByTimeRequest struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // time is a unix timestamp of the time the vesting progress should be calculated + Time uint64 `protobuf:"varint,2,opt,name=time,proto3" json:"time,omitempty"` +} + +func (m *QueryTeamVestingStatusByTimeRequest) Reset() { *m = QueryTeamVestingStatusByTimeRequest{} } +func (m *QueryTeamVestingStatusByTimeRequest) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingStatusByTimeRequest) ProtoMessage() {} +func (*QueryTeamVestingStatusByTimeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{8} +} +func (m *QueryTeamVestingStatusByTimeRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingStatusByTimeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingStatusByTimeRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingStatusByTimeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingStatusByTimeRequest.Merge(m, src) +} +func (m *QueryTeamVestingStatusByTimeRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingStatusByTimeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingStatusByTimeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingStatusByTimeRequest proto.InternalMessageInfo + +func (m *QueryTeamVestingStatusByTimeRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *QueryTeamVestingStatusByTimeRequest) GetTime() uint64 { + if m != nil { + return m.Time + } + return 0 +} + +// QueryTeamVestingStatusByTimeResponse is the response type for the Query/TeamCurrentVestingByTimeStatus RPC method. +type QueryTeamVestingStatusByTimeResponse struct { + // request_date .. + RequestDate string `protobuf:"bytes,1,opt,name=request_date,json=requestDate,proto3" json:"request_date,omitempty"` + // plan ... + Plan *QueryVestingPlan `protobuf:"bytes,2,opt,name=plan,proto3" json:"plan,omitempty"` + // status .. + Status *QueryVestingStatus `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` +} + +func (m *QueryTeamVestingStatusByTimeResponse) Reset() { *m = QueryTeamVestingStatusByTimeResponse{} } +func (m *QueryTeamVestingStatusByTimeResponse) String() string { return proto.CompactTextString(m) } +func (*QueryTeamVestingStatusByTimeResponse) ProtoMessage() {} +func (*QueryTeamVestingStatusByTimeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{9} +} +func (m *QueryTeamVestingStatusByTimeResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryTeamVestingStatusByTimeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryTeamVestingStatusByTimeResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryTeamVestingStatusByTimeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryTeamVestingStatusByTimeResponse.Merge(m, src) +} +func (m *QueryTeamVestingStatusByTimeResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryTeamVestingStatusByTimeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryTeamVestingStatusByTimeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryTeamVestingStatusByTimeResponse proto.InternalMessageInfo + +func (m *QueryTeamVestingStatusByTimeResponse) GetRequestDate() string { + if m != nil { + return m.RequestDate + } + return "" +} + +func (m *QueryTeamVestingStatusByTimeResponse) GetPlan() *QueryVestingPlan { + if m != nil { + return m.Plan + } + return nil +} + +func (m *QueryTeamVestingStatusByTimeResponse) GetStatus() *QueryVestingStatus { + if m != nil { + return m.Status + } + return nil +} + +// QueryVestingStatus is a type holding information about the account's vesting progress +type QueryVestingStatus struct { + // total_vested_amount ... + TotalVestedAmount uint64 `protobuf:"varint,1,opt,name=total_vested_amount,json=totalVestedAmount,proto3" json:"total_vested_amount,omitempty"` + // total_unlocked_amount ... + TotalUnlockedAmount uint64 `protobuf:"varint,2,opt,name=total_unlocked_amount,json=totalUnlockedAmount,proto3" json:"total_unlocked_amount,omitempty"` + // current_claimable_amount ... + CurrentClaimableAmount uint64 `protobuf:"varint,3,opt,name=current_claimable_amount,json=currentClaimableAmount,proto3" json:"current_claimable_amount,omitempty"` + // locked_vested_amount ... + LockedVestedAmount uint64 `protobuf:"varint,4,opt,name=locked_vested_amount,json=lockedVestedAmount,proto3" json:"locked_vested_amount,omitempty"` + // remaining_unvested_amount ... + RemainingUnvestedAmount uint64 `protobuf:"varint,5,opt,name=remaining_unvested_amount,json=remainingUnvestedAmount,proto3" json:"remaining_unvested_amount,omitempty"` + // claimed_amount ... + ClaimedAmount uint64 `protobuf:"varint,6,opt,name=claimed_amount,json=claimedAmount,proto3" json:"claimed_amount,omitempty"` + // total_rewards ... + TotalRewards uint64 `protobuf:"varint,7,opt,name=total_rewards,json=totalRewards,proto3" json:"total_rewards,omitempty"` + // claimed_rewards ... + ClaimedRewards uint64 `protobuf:"varint,8,opt,name=claimed_rewards,json=claimedRewards,proto3" json:"claimed_rewards,omitempty"` + // available_rewards ... + AvailableRewards uint64 `protobuf:"varint,9,opt,name=available_rewards,json=availableRewards,proto3" json:"available_rewards,omitempty"` +} + +func (m *QueryVestingStatus) Reset() { *m = QueryVestingStatus{} } +func (m *QueryVestingStatus) String() string { return proto.CompactTextString(m) } +func (*QueryVestingStatus) ProtoMessage() {} +func (*QueryVestingStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{10} +} +func (m *QueryVestingStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryVestingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryVestingStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryVestingStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryVestingStatus.Merge(m, src) +} +func (m *QueryVestingStatus) XXX_Size() int { + return m.Size() +} +func (m *QueryVestingStatus) XXX_DiscardUnknown() { + xxx_messageInfo_QueryVestingStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryVestingStatus proto.InternalMessageInfo + +func (m *QueryVestingStatus) GetTotalVestedAmount() uint64 { + if m != nil { + return m.TotalVestedAmount + } + return 0 +} + +func (m *QueryVestingStatus) GetTotalUnlockedAmount() uint64 { + if m != nil { + return m.TotalUnlockedAmount + } + return 0 +} + +func (m *QueryVestingStatus) GetCurrentClaimableAmount() uint64 { + if m != nil { + return m.CurrentClaimableAmount + } + return 0 +} + +func (m *QueryVestingStatus) GetLockedVestedAmount() uint64 { + if m != nil { + return m.LockedVestedAmount + } + return 0 +} + +func (m *QueryVestingStatus) GetRemainingUnvestedAmount() uint64 { + if m != nil { + return m.RemainingUnvestedAmount + } + return 0 +} + +func (m *QueryVestingStatus) GetClaimedAmount() uint64 { + if m != nil { + return m.ClaimedAmount + } + return 0 +} + +func (m *QueryVestingStatus) GetTotalRewards() uint64 { + if m != nil { + return m.TotalRewards + } + return 0 +} + +func (m *QueryVestingStatus) GetClaimedRewards() uint64 { + if m != nil { + return m.ClaimedRewards + } + return 0 +} + +func (m *QueryVestingStatus) GetAvailableRewards() uint64 { + if m != nil { + return m.AvailableRewards + } + return 0 +} + +// QueryVestingPlan is a type holding information about the account's vesting data which does not change +type QueryVestingPlan struct { + // commencement ... + Commencement string `protobuf:"bytes,1,opt,name=commencement,proto3" json:"commencement,omitempty"` + // token_vesting_start ... + TokenVestingStart string `protobuf:"bytes,2,opt,name=token_vesting_start,json=tokenVestingStart,proto3" json:"token_vesting_start,omitempty"` + // token_vesting_finished ... + TokenVestingFinished string `protobuf:"bytes,3,opt,name=token_vesting_finished,json=tokenVestingFinished,proto3" json:"token_vesting_finished,omitempty"` + // token_unlock_start ... + TokenUnlockStart string `protobuf:"bytes,4,opt,name=token_unlock_start,json=tokenUnlockStart,proto3" json:"token_unlock_start,omitempty"` + // token_unlock_finished ... + TokenUnlockFinished string `protobuf:"bytes,5,opt,name=token_unlock_finished,json=tokenUnlockFinished,proto3" json:"token_unlock_finished,omitempty"` + // clawback ... + Clawback uint64 `protobuf:"varint,6,opt,name=clawback,proto3" json:"clawback,omitempty"` + // clawback_amount ... + ClawbackAmount uint64 `protobuf:"varint,7,opt,name=clawback_amount,json=clawbackAmount,proto3" json:"clawback_amount,omitempty"` + // maximum_vesting_amount ... + MaximumVestingAmount uint64 `protobuf:"varint,8,opt,name=maximum_vesting_amount,json=maximumVestingAmount,proto3" json:"maximum_vesting_amount,omitempty"` +} + +func (m *QueryVestingPlan) Reset() { *m = QueryVestingPlan{} } +func (m *QueryVestingPlan) String() string { return proto.CompactTextString(m) } +func (*QueryVestingPlan) ProtoMessage() {} +func (*QueryVestingPlan) Descriptor() ([]byte, []int) { + return fileDescriptor_6dd564523865e528, []int{11} +} +func (m *QueryVestingPlan) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryVestingPlan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryVestingPlan.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryVestingPlan) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryVestingPlan.Merge(m, src) +} +func (m *QueryVestingPlan) XXX_Size() int { + return m.Size() +} +func (m *QueryVestingPlan) XXX_DiscardUnknown() { + xxx_messageInfo_QueryVestingPlan.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryVestingPlan proto.InternalMessageInfo + +func (m *QueryVestingPlan) GetCommencement() string { + if m != nil { + return m.Commencement + } + return "" +} + +func (m *QueryVestingPlan) GetTokenVestingStart() string { + if m != nil { + return m.TokenVestingStart + } + return "" +} + +func (m *QueryVestingPlan) GetTokenVestingFinished() string { + if m != nil { + return m.TokenVestingFinished + } + return "" +} + +func (m *QueryVestingPlan) GetTokenUnlockStart() string { + if m != nil { + return m.TokenUnlockStart + } + return "" +} + +func (m *QueryVestingPlan) GetTokenUnlockFinished() string { + if m != nil { + return m.TokenUnlockFinished + } + return "" +} + +func (m *QueryVestingPlan) GetClawback() uint64 { + if m != nil { + return m.Clawback + } + return 0 +} + +func (m *QueryVestingPlan) GetClawbackAmount() uint64 { + if m != nil { + return m.ClawbackAmount + } + return 0 +} + +func (m *QueryVestingPlan) GetMaximumVestingAmount() uint64 { + if m != nil { + return m.MaximumVestingAmount + } + return 0 +} + +func init() { + proto.RegisterType((*QueryTeamInfoRequest)(nil), "kyve.team.v1beta1.QueryTeamInfoRequest") + proto.RegisterType((*QueryTeamInfoResponse)(nil), "kyve.team.v1beta1.QueryTeamInfoResponse") + proto.RegisterType((*QueryTeamVestingAccountsRequest)(nil), "kyve.team.v1beta1.QueryTeamVestingAccountsRequest") + proto.RegisterType((*QueryTeamVestingAccountsResponse)(nil), "kyve.team.v1beta1.QueryTeamVestingAccountsResponse") + proto.RegisterType((*QueryTeamVestingAccountRequest)(nil), "kyve.team.v1beta1.QueryTeamVestingAccountRequest") + proto.RegisterType((*QueryTeamVestingAccountResponse)(nil), "kyve.team.v1beta1.QueryTeamVestingAccountResponse") + proto.RegisterType((*QueryTeamVestingStatusRequest)(nil), "kyve.team.v1beta1.QueryTeamVestingStatusRequest") + proto.RegisterType((*QueryTeamVestingStatusResponse)(nil), "kyve.team.v1beta1.QueryTeamVestingStatusResponse") + proto.RegisterType((*QueryTeamVestingStatusByTimeRequest)(nil), "kyve.team.v1beta1.QueryTeamVestingStatusByTimeRequest") + proto.RegisterType((*QueryTeamVestingStatusByTimeResponse)(nil), "kyve.team.v1beta1.QueryTeamVestingStatusByTimeResponse") + proto.RegisterType((*QueryVestingStatus)(nil), "kyve.team.v1beta1.QueryVestingStatus") + proto.RegisterType((*QueryVestingPlan)(nil), "kyve.team.v1beta1.QueryVestingPlan") +} + +func init() { proto.RegisterFile("kyve/team/v1beta1/query.proto", fileDescriptor_6dd564523865e528) } + +var fileDescriptor_6dd564523865e528 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4f, 0x6f, 0x1b, 0xc5, + 0x1b, 0xce, 0x26, 0xce, 0x1f, 0xbf, 0x49, 0xfb, 0x4b, 0x26, 0x6e, 0xe2, 0xfa, 0x97, 0xba, 0xe9, + 0xa6, 0x55, 0x03, 0x2d, 0xde, 0xc4, 0x89, 0x52, 0x14, 0x01, 0x52, 0x52, 0x0a, 0xaa, 0x10, 0x08, + 0x4c, 0x1b, 0x09, 0x2e, 0xab, 0xf1, 0x7a, 0xea, 0x8c, 0xbc, 0x7f, 0x92, 0xdd, 0xd9, 0xa4, 0x56, + 0xd5, 0x0b, 0x7c, 0x01, 0x24, 0x3e, 0x00, 0x9f, 0x00, 0x09, 0xce, 0x9c, 0xe0, 0xd4, 0x13, 0xaa, + 0xc4, 0x85, 0x13, 0x42, 0x09, 0x5f, 0x81, 0x03, 0x37, 0xb4, 0xef, 0xcc, 0xae, 0xbd, 0xbb, 0x76, + 0x1b, 0xdf, 0xb8, 0x6d, 0xf6, 0x79, 0x9f, 0x79, 0x9e, 0xf7, 0x9d, 0xd9, 0x67, 0x1c, 0xb8, 0xd6, + 0xe9, 0x9e, 0x30, 0x43, 0x30, 0xea, 0x18, 0x27, 0x9b, 0x4d, 0x26, 0xe8, 0xa6, 0x71, 0x1c, 0x32, + 0xbf, 0x5b, 0x3b, 0xf2, 0x3d, 0xe1, 0x91, 0x85, 0x08, 0xae, 0x45, 0x70, 0x4d, 0xc1, 0x95, 0x52, + 0xdb, 0x6b, 0x7b, 0x88, 0x1a, 0xd1, 0x93, 0x2c, 0xac, 0xac, 0xb4, 0x3d, 0xaf, 0x6d, 0x33, 0x83, + 0x1e, 0x71, 0x83, 0xba, 0xae, 0x27, 0xa8, 0xe0, 0x9e, 0x1b, 0xc4, 0x68, 0x5e, 0x05, 0xd7, 0x44, + 0x54, 0x5f, 0x82, 0xd2, 0x67, 0x91, 0xe6, 0x23, 0x46, 0x9d, 0x87, 0xee, 0x13, 0xaf, 0xc1, 0x8e, + 0x43, 0x16, 0x08, 0xfd, 0xbb, 0x49, 0xb8, 0x92, 0x01, 0x82, 0x23, 0xcf, 0x0d, 0x18, 0x59, 0x81, + 0x22, 0x0d, 0xc5, 0xa1, 0xe7, 0x73, 0xd1, 0x2d, 0x6b, 0xab, 0xda, 0x7a, 0xb1, 0xd1, 0x7b, 0x41, + 0xea, 0x70, 0x45, 0x78, 0x82, 0xda, 0x66, 0xa4, 0x61, 0x52, 0xdb, 0xf6, 0x2c, 0x74, 0x53, 0x1e, + 0x5f, 0xd5, 0xd6, 0x0b, 0x8d, 0x45, 0x04, 0xa3, 0x35, 0xf7, 0x12, 0x88, 0x6c, 0xc3, 0x12, 0x0f, + 0x82, 0x90, 0xb5, 0x72, 0xa4, 0x09, 0x24, 0x95, 0x24, 0x9a, 0x61, 0xed, 0xc2, 0x55, 0x7a, 0x42, + 0xb9, 0x4d, 0x9b, 0x36, 0xcb, 0x11, 0x0b, 0x48, 0x5c, 0x4e, 0x0a, 0x32, 0xdc, 0x1d, 0x58, 0x96, + 0x2e, 0x13, 0xe3, 0xa6, 0xcf, 0x4e, 0xa9, 0xdf, 0x0a, 0xca, 0x93, 0xc8, 0x94, 0x4d, 0xec, 0xc5, + 0x68, 0x43, 0x82, 0x91, 0xa6, 0x65, 0x53, 0xee, 0xb0, 0xd6, 0x00, 0xe6, 0x94, 0xd4, 0x54, 0x05, + 0x39, 0xee, 0x7b, 0xf0, 0xff, 0x9e, 0xdf, 0x3c, 0x7b, 0x1a, 0xd9, 0xbd, 0x96, 0x72, 0xfc, 0x64, + 0xb2, 0xd4, 0xb2, 0xbc, 0xd0, 0x15, 0x09, 0x73, 0xa6, 0x6f, 0xb2, 0x7b, 0x12, 0x8b, 0x39, 0x3b, + 0xb0, 0x9c, 0xf8, 0xcd, 0xb0, 0x8a, 0xb2, 0xcf, 0xd8, 0x6d, 0x9a, 0x97, 0x9a, 0x6d, 0x96, 0x09, + 0x99, 0xd9, 0xe6, 0x35, 0x7d, 0x76, 0x1c, 0x72, 0x9f, 0xb5, 0x4c, 0xc7, 0x6b, 0x85, 0x36, 0x33, + 0x9b, 0xd4, 0xa6, 0xae, 0xc5, 0xca, 0xb3, 0x52, 0x33, 0x86, 0x3f, 0x46, 0x74, 0x5f, 0x82, 0xa4, + 0x06, 0x8b, 0xb8, 0x8b, 0x19, 0xce, 0x1c, 0x72, 0x16, 0x22, 0x28, 0x55, 0xaf, 0xdf, 0x80, 0xeb, + 0xc9, 0x01, 0x3d, 0x60, 0x81, 0xe0, 0x6e, 0x5b, 0x39, 0x09, 0xe2, 0x43, 0xdc, 0x81, 0xd5, 0xe1, + 0x25, 0xea, 0x38, 0x7f, 0x08, 0x33, 0xaa, 0xc1, 0xa0, 0xac, 0xad, 0x4e, 0xac, 0xcf, 0xd6, 0x6f, + 0xd5, 0x72, 0x1f, 0x5e, 0x2d, 0xbf, 0xc2, 0x7e, 0xe1, 0xc5, 0x1f, 0xd7, 0xc7, 0x1a, 0x09, 0x59, + 0xdf, 0x80, 0xea, 0x10, 0x31, 0x65, 0x87, 0x5c, 0x86, 0x71, 0xde, 0xc2, 0x4f, 0xa6, 0xd0, 0x18, + 0xe7, 0x2d, 0xfd, 0x70, 0x68, 0x07, 0x89, 0xbb, 0x07, 0x30, 0xad, 0x04, 0x90, 0x37, 0xa2, 0xb9, + 0x98, 0xab, 0x1b, 0x70, 0x2d, 0xab, 0xf4, 0xb9, 0xa0, 0x22, 0x0c, 0x86, 0x59, 0xfb, 0x49, 0xcb, + 0x77, 0x13, 0x33, 0x94, 0xb5, 0x1b, 0x30, 0xe7, 0x4b, 0xb6, 0xd9, 0xa2, 0x82, 0xa9, 0x28, 0x98, + 0x55, 0xef, 0xde, 0xa7, 0x82, 0x91, 0x7b, 0x50, 0x38, 0xb2, 0xa9, 0xfc, 0xf6, 0x67, 0xeb, 0x6b, + 0x03, 0xac, 0xa3, 0x86, 0x5a, 0xff, 0x53, 0x9b, 0xba, 0x0d, 0x24, 0x90, 0x77, 0x61, 0x2a, 0x40, + 0x35, 0x4c, 0x80, 0xc1, 0x5d, 0xf7, 0x53, 0x95, 0x35, 0x45, 0xd2, 0x1f, 0xc2, 0xda, 0x60, 0xf3, + 0xfb, 0xdd, 0x47, 0xdc, 0x61, 0x43, 0x9a, 0x26, 0x04, 0x0a, 0x82, 0x3b, 0x4c, 0x45, 0x15, 0x3e, + 0xeb, 0x3f, 0x6b, 0x70, 0xf3, 0xd5, 0x6b, 0xfd, 0xf7, 0xc7, 0xf1, 0xcb, 0x04, 0x90, 0x3c, 0x8c, + 0x1f, 0x1c, 0x06, 0xca, 0x09, 0x0b, 0x44, 0x94, 0x10, 0x4e, 0x72, 0xce, 0xa2, 0x0f, 0x2e, 0x82, + 0x0e, 0x10, 0xd9, 0x43, 0xa0, 0x17, 0x40, 0xa1, 0x6b, 0x7b, 0x56, 0xa7, 0xc7, 0xe8, 0x8f, 0xf6, + 0xc7, 0x0a, 0x53, 0x9c, 0xb7, 0xa1, 0x6c, 0x85, 0xbe, 0xcf, 0x5c, 0x61, 0x62, 0xd2, 0xc8, 0x40, + 0x91, 0x34, 0x19, 0xee, 0x4b, 0x0a, 0xbf, 0x1f, 0xc3, 0x8a, 0xb9, 0x01, 0x25, 0xa5, 0x92, 0xb6, + 0x27, 0x93, 0x9d, 0x48, 0x2c, 0xe5, 0x6f, 0x17, 0xae, 0xfa, 0xcc, 0xa1, 0xdc, 0xe5, 0x6e, 0xdb, + 0x0c, 0xdd, 0x34, 0x4d, 0xc6, 0xfa, 0x72, 0x52, 0xf0, 0x58, 0xe1, 0x8a, 0x7b, 0x0b, 0x2e, 0x27, + 0x41, 0x29, 0x09, 0x32, 0xcd, 0x2f, 0xc5, 0xf9, 0x28, 0xcb, 0xd6, 0xe0, 0x92, 0x1c, 0x41, 0x3a, + 0xb5, 0xe7, 0xf0, 0x65, 0x1c, 0x80, 0xb7, 0xe1, 0x7f, 0xf1, 0x5a, 0xe9, 0x88, 0x8e, 0x25, 0xe2, + 0xc2, 0x3b, 0xb0, 0xd0, 0x4b, 0xd9, 0x74, 0x2e, 0xcf, 0x27, 0x80, 0x2a, 0xd6, 0xff, 0x19, 0x87, + 0xf9, 0xec, 0xf1, 0x20, 0x3a, 0xcc, 0x59, 0x9e, 0xe3, 0x30, 0xd7, 0x62, 0x0e, 0x53, 0x7b, 0x57, + 0x6c, 0xa4, 0xde, 0xc9, 0x6d, 0xee, 0x30, 0x17, 0xe7, 0x18, 0x8d, 0x26, 0x10, 0xd4, 0x97, 0x9b, + 0x56, 0x8c, 0xb6, 0xb9, 0xc3, 0xdc, 0xde, 0xb9, 0xf0, 0x45, 0x74, 0x1b, 0xa7, 0xeb, 0x9f, 0x70, + 0x97, 0x07, 0x87, 0xac, 0x85, 0x1b, 0x56, 0x6c, 0x94, 0xfa, 0x29, 0x1f, 0x28, 0x8c, 0xdc, 0x05, + 0x22, 0x59, 0xf2, 0x70, 0x28, 0x91, 0x02, 0x32, 0xe6, 0x11, 0x91, 0x27, 0x43, 0x6a, 0xe0, 0x51, + 0xea, 0xab, 0x4e, 0x24, 0x26, 0x91, 0xb0, 0xd8, 0x47, 0x48, 0x14, 0x2a, 0x30, 0x63, 0xd9, 0xf4, + 0xb4, 0x49, 0xad, 0x8e, 0xda, 0x9c, 0xe4, 0x6f, 0x35, 0x72, 0x7c, 0x8e, 0xf7, 0x6f, 0x3a, 0x19, + 0x39, 0xbe, 0x56, 0x1b, 0xb8, 0x0d, 0x4b, 0x0e, 0x7d, 0xca, 0x9d, 0xd0, 0x49, 0xda, 0x53, 0xf5, + 0x72, 0x8b, 0x4a, 0x0a, 0x8d, 0xe3, 0x14, 0xb1, 0xfa, 0xdf, 0x53, 0x30, 0x89, 0xb3, 0x27, 0x5f, + 0x6b, 0x30, 0x13, 0xff, 0x22, 0x22, 0xb7, 0x87, 0x7d, 0x86, 0x99, 0x1f, 0x53, 0x95, 0xf5, 0xd7, + 0x17, 0xca, 0x14, 0xd1, 0x6f, 0x7e, 0xf5, 0xdb, 0x5f, 0xdf, 0x8e, 0x57, 0xc9, 0x8a, 0x31, 0xf8, + 0x57, 0x9b, 0xc9, 0x23, 0xe1, 0x1f, 0x34, 0x58, 0x1c, 0x70, 0xa7, 0x91, 0xfa, 0xab, 0x74, 0x06, + 0xdf, 0x91, 0x95, 0xad, 0x91, 0x38, 0xca, 0xe6, 0x06, 0xda, 0x7c, 0x93, 0xac, 0x0f, 0xb3, 0x99, + 0x0c, 0x37, 0xb6, 0xf6, 0xa3, 0x06, 0x24, 0xbf, 0x22, 0xd9, 0xbc, 0xb8, 0x7a, 0x6c, 0xb8, 0x3e, + 0x0a, 0x45, 0xf9, 0xdd, 0x46, 0xbf, 0x35, 0x72, 0xf7, 0x82, 0x7e, 0x8d, 0x67, 0xbc, 0xf5, 0x9c, + 0x7c, 0xaf, 0xc1, 0x42, 0x2e, 0xf6, 0xc9, 0xc6, 0x05, 0xf4, 0x53, 0x97, 0x6b, 0x65, 0x73, 0x04, + 0x86, 0x32, 0xbc, 0x85, 0x86, 0xdf, 0x22, 0x77, 0x5e, 0x67, 0x58, 0x46, 0xbc, 0xf4, 0xfb, 0xab, + 0x06, 0xcb, 0x43, 0xae, 0x29, 0xb2, 0x73, 0x61, 0x0f, 0xa9, 0x3b, 0xb2, 0x72, 0x6f, 0x64, 0x9e, + 0xea, 0x60, 0x1f, 0x3b, 0x78, 0x87, 0xec, 0x5e, 0xac, 0x03, 0xb3, 0xd9, 0x35, 0xa3, 0x0b, 0x17, + 0x3b, 0x31, 0x9e, 0x45, 0x8f, 0xcf, 0xf7, 0xef, 0xbf, 0x38, 0xab, 0x6a, 0x2f, 0xcf, 0xaa, 0xda, + 0x9f, 0x67, 0x55, 0xed, 0x9b, 0xf3, 0xea, 0xd8, 0xcb, 0xf3, 0xea, 0xd8, 0xef, 0xe7, 0xd5, 0xb1, + 0x2f, 0xdf, 0x68, 0x73, 0x71, 0x18, 0x36, 0x6b, 0x96, 0xe7, 0x18, 0x1f, 0x7d, 0x71, 0xf0, 0xe0, + 0x13, 0x26, 0x4e, 0x3d, 0xbf, 0x63, 0x58, 0x87, 0x94, 0xbb, 0xc6, 0x53, 0x29, 0x27, 0xba, 0x47, + 0x2c, 0x68, 0x4e, 0xe1, 0x3f, 0x3a, 0x5b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x0b, 0xb7, 0x9a, + 0x08, 0x6e, 0x0d, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // TeamInfo queries all important information from the team module + TeamInfo(ctx context.Context, in *QueryTeamInfoRequest, opts ...grpc.CallOption) (*QueryTeamInfoResponse, error) + // TeamVestingAccounts queries all team vesting accounts of the module. + TeamVestingAccounts(ctx context.Context, in *QueryTeamVestingAccountsRequest, opts ...grpc.CallOption) (*QueryTeamVestingAccountsResponse, error) + // TeamVestingAccount queries the team vesting accounts of the module. + TeamVestingAccount(ctx context.Context, in *QueryTeamVestingAccountRequest, opts ...grpc.CallOption) (*QueryTeamVestingAccountResponse, error) + // TeamCurrentVestingStatus queries the current vesting progress of a team vesting account + TeamVestingStatus(ctx context.Context, in *QueryTeamVestingStatusRequest, opts ...grpc.CallOption) (*QueryTeamVestingStatusResponse, error) + // TeamCurrentVestingStatus queries the current vesting progress of a team vesting account + TeamVestingStatusByTime(ctx context.Context, in *QueryTeamVestingStatusByTimeRequest, opts ...grpc.CallOption) (*QueryTeamVestingStatusByTimeResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) TeamInfo(ctx context.Context, in *QueryTeamInfoRequest, opts ...grpc.CallOption) (*QueryTeamInfoResponse, error) { + out := new(QueryTeamInfoResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Query/TeamInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TeamVestingAccounts(ctx context.Context, in *QueryTeamVestingAccountsRequest, opts ...grpc.CallOption) (*QueryTeamVestingAccountsResponse, error) { + out := new(QueryTeamVestingAccountsResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Query/TeamVestingAccounts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TeamVestingAccount(ctx context.Context, in *QueryTeamVestingAccountRequest, opts ...grpc.CallOption) (*QueryTeamVestingAccountResponse, error) { + out := new(QueryTeamVestingAccountResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Query/TeamVestingAccount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TeamVestingStatus(ctx context.Context, in *QueryTeamVestingStatusRequest, opts ...grpc.CallOption) (*QueryTeamVestingStatusResponse, error) { + out := new(QueryTeamVestingStatusResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Query/TeamVestingStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) TeamVestingStatusByTime(ctx context.Context, in *QueryTeamVestingStatusByTimeRequest, opts ...grpc.CallOption) (*QueryTeamVestingStatusByTimeResponse, error) { + out := new(QueryTeamVestingStatusByTimeResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Query/TeamVestingStatusByTime", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // TeamInfo queries all important information from the team module + TeamInfo(context.Context, *QueryTeamInfoRequest) (*QueryTeamInfoResponse, error) + // TeamVestingAccounts queries all team vesting accounts of the module. + TeamVestingAccounts(context.Context, *QueryTeamVestingAccountsRequest) (*QueryTeamVestingAccountsResponse, error) + // TeamVestingAccount queries the team vesting accounts of the module. + TeamVestingAccount(context.Context, *QueryTeamVestingAccountRequest) (*QueryTeamVestingAccountResponse, error) + // TeamCurrentVestingStatus queries the current vesting progress of a team vesting account + TeamVestingStatus(context.Context, *QueryTeamVestingStatusRequest) (*QueryTeamVestingStatusResponse, error) + // TeamCurrentVestingStatus queries the current vesting progress of a team vesting account + TeamVestingStatusByTime(context.Context, *QueryTeamVestingStatusByTimeRequest) (*QueryTeamVestingStatusByTimeResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) TeamInfo(ctx context.Context, req *QueryTeamInfoRequest) (*QueryTeamInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TeamInfo not implemented") +} +func (*UnimplementedQueryServer) TeamVestingAccounts(ctx context.Context, req *QueryTeamVestingAccountsRequest) (*QueryTeamVestingAccountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TeamVestingAccounts not implemented") +} +func (*UnimplementedQueryServer) TeamVestingAccount(ctx context.Context, req *QueryTeamVestingAccountRequest) (*QueryTeamVestingAccountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TeamVestingAccount not implemented") +} +func (*UnimplementedQueryServer) TeamVestingStatus(ctx context.Context, req *QueryTeamVestingStatusRequest) (*QueryTeamVestingStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TeamVestingStatus not implemented") +} +func (*UnimplementedQueryServer) TeamVestingStatusByTime(ctx context.Context, req *QueryTeamVestingStatusByTimeRequest) (*QueryTeamVestingStatusByTimeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TeamVestingStatusByTime not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_TeamInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTeamInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TeamInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Query/TeamInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TeamInfo(ctx, req.(*QueryTeamInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TeamVestingAccounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTeamVestingAccountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TeamVestingAccounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Query/TeamVestingAccounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TeamVestingAccounts(ctx, req.(*QueryTeamVestingAccountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TeamVestingAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTeamVestingAccountRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TeamVestingAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Query/TeamVestingAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TeamVestingAccount(ctx, req.(*QueryTeamVestingAccountRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TeamVestingStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTeamVestingStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TeamVestingStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Query/TeamVestingStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TeamVestingStatus(ctx, req.(*QueryTeamVestingStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_TeamVestingStatusByTime_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryTeamVestingStatusByTimeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).TeamVestingStatusByTime(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Query/TeamVestingStatusByTime", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).TeamVestingStatusByTime(ctx, req.(*QueryTeamVestingStatusByTimeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.team.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "TeamInfo", + Handler: _Query_TeamInfo_Handler, + }, + { + MethodName: "TeamVestingAccounts", + Handler: _Query_TeamVestingAccounts_Handler, + }, + { + MethodName: "TeamVestingAccount", + Handler: _Query_TeamVestingAccount_Handler, + }, + { + MethodName: "TeamVestingStatus", + Handler: _Query_TeamVestingStatus_Handler, + }, + { + MethodName: "TeamVestingStatusByTime", + Handler: _Query_TeamVestingStatusByTime_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/team/v1beta1/query.proto", +} + +func (m *QueryTeamInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryTeamInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TeamModuleBalance != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TeamModuleBalance)) + i-- + dAtA[i] = 0x60 + } + if m.RequiredModuleBalance != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RequiredModuleBalance)) + i-- + dAtA[i] = 0x58 + } + if m.AvailableAccountRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.AvailableAccountRewards)) + i-- + dAtA[i] = 0x50 + } + if m.ClaimedAccountRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ClaimedAccountRewards)) + i-- + dAtA[i] = 0x48 + } + if m.TotalAccountRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalAccountRewards)) + i-- + dAtA[i] = 0x40 + } + if m.AvailableAuthorityRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.AvailableAuthorityRewards)) + i-- + dAtA[i] = 0x38 + } + if m.ClaimedAuthorityRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ClaimedAuthorityRewards)) + i-- + dAtA[i] = 0x30 + } + if m.TotalAuthorityRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalAuthorityRewards)) + i-- + dAtA[i] = 0x28 + } + if m.AvailableTeamAllocation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.AvailableTeamAllocation)) + i-- + dAtA[i] = 0x20 + } + if m.IssuedTeamAllocation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.IssuedTeamAllocation)) + i-- + dAtA[i] = 0x18 + } + if m.TotalTeamAllocation != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalTeamAllocation)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingAccountsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingAccountsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingAccountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingAccountsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingAccountsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingAccountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Accounts) > 0 { + for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingAccountRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingAccountRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingAccountRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingAccountResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingAccountResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingAccountResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Account.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Id != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Plan != nil { + { + size, err := m.Plan.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.RequestDate) > 0 { + i -= len(m.RequestDate) + copy(dAtA[i:], m.RequestDate) + i = encodeVarintQuery(dAtA, i, uint64(len(m.RequestDate))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingStatusByTimeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingStatusByTimeRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingStatusByTimeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Time != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Time)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryTeamVestingStatusByTimeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryTeamVestingStatusByTimeResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryTeamVestingStatusByTimeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Plan != nil { + { + size, err := m.Plan.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.RequestDate) > 0 { + i -= len(m.RequestDate) + copy(dAtA[i:], m.RequestDate) + i = encodeVarintQuery(dAtA, i, uint64(len(m.RequestDate))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryVestingStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryVestingStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryVestingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AvailableRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.AvailableRewards)) + i-- + dAtA[i] = 0x48 + } + if m.ClaimedRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ClaimedRewards)) + i-- + dAtA[i] = 0x40 + } + if m.TotalRewards != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalRewards)) + i-- + dAtA[i] = 0x38 + } + if m.ClaimedAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ClaimedAmount)) + i-- + dAtA[i] = 0x30 + } + if m.RemainingUnvestedAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.RemainingUnvestedAmount)) + i-- + dAtA[i] = 0x28 + } + if m.LockedVestedAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.LockedVestedAmount)) + i-- + dAtA[i] = 0x20 + } + if m.CurrentClaimableAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.CurrentClaimableAmount)) + i-- + dAtA[i] = 0x18 + } + if m.TotalUnlockedAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalUnlockedAmount)) + i-- + dAtA[i] = 0x10 + } + if m.TotalVestedAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.TotalVestedAmount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *QueryVestingPlan) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryVestingPlan) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryVestingPlan) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaximumVestingAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.MaximumVestingAmount)) + i-- + dAtA[i] = 0x40 + } + if m.ClawbackAmount != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.ClawbackAmount)) + i-- + dAtA[i] = 0x38 + } + if m.Clawback != 0 { + i = encodeVarintQuery(dAtA, i, uint64(m.Clawback)) + i-- + dAtA[i] = 0x30 + } + if len(m.TokenUnlockFinished) > 0 { + i -= len(m.TokenUnlockFinished) + copy(dAtA[i:], m.TokenUnlockFinished) + i = encodeVarintQuery(dAtA, i, uint64(len(m.TokenUnlockFinished))) + i-- + dAtA[i] = 0x2a + } + if len(m.TokenUnlockStart) > 0 { + i -= len(m.TokenUnlockStart) + copy(dAtA[i:], m.TokenUnlockStart) + i = encodeVarintQuery(dAtA, i, uint64(len(m.TokenUnlockStart))) + i-- + dAtA[i] = 0x22 + } + if len(m.TokenVestingFinished) > 0 { + i -= len(m.TokenVestingFinished) + copy(dAtA[i:], m.TokenVestingFinished) + i = encodeVarintQuery(dAtA, i, uint64(len(m.TokenVestingFinished))) + i-- + dAtA[i] = 0x1a + } + if len(m.TokenVestingStart) > 0 { + i -= len(m.TokenVestingStart) + copy(dAtA[i:], m.TokenVestingStart) + i = encodeVarintQuery(dAtA, i, uint64(len(m.TokenVestingStart))) + i-- + dAtA[i] = 0x12 + } + if len(m.Commencement) > 0 { + i -= len(m.Commencement) + copy(dAtA[i:], m.Commencement) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Commencement))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryTeamInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryTeamInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.TotalTeamAllocation != 0 { + n += 1 + sovQuery(uint64(m.TotalTeamAllocation)) + } + if m.IssuedTeamAllocation != 0 { + n += 1 + sovQuery(uint64(m.IssuedTeamAllocation)) + } + if m.AvailableTeamAllocation != 0 { + n += 1 + sovQuery(uint64(m.AvailableTeamAllocation)) + } + if m.TotalAuthorityRewards != 0 { + n += 1 + sovQuery(uint64(m.TotalAuthorityRewards)) + } + if m.ClaimedAuthorityRewards != 0 { + n += 1 + sovQuery(uint64(m.ClaimedAuthorityRewards)) + } + if m.AvailableAuthorityRewards != 0 { + n += 1 + sovQuery(uint64(m.AvailableAuthorityRewards)) + } + if m.TotalAccountRewards != 0 { + n += 1 + sovQuery(uint64(m.TotalAccountRewards)) + } + if m.ClaimedAccountRewards != 0 { + n += 1 + sovQuery(uint64(m.ClaimedAccountRewards)) + } + if m.AvailableAccountRewards != 0 { + n += 1 + sovQuery(uint64(m.AvailableAccountRewards)) + } + if m.RequiredModuleBalance != 0 { + n += 1 + sovQuery(uint64(m.RequiredModuleBalance)) + } + if m.TeamModuleBalance != 0 { + n += 1 + sovQuery(uint64(m.TeamModuleBalance)) + } + return n +} + +func (m *QueryTeamVestingAccountsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryTeamVestingAccountsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Accounts) > 0 { + for _, e := range m.Accounts { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryTeamVestingAccountRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovQuery(uint64(m.Id)) + } + return n +} + +func (m *QueryTeamVestingAccountResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Account.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryTeamVestingStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovQuery(uint64(m.Id)) + } + return n +} + +func (m *QueryTeamVestingStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RequestDate) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryTeamVestingStatusByTimeRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovQuery(uint64(m.Id)) + } + if m.Time != 0 { + n += 1 + sovQuery(uint64(m.Time)) + } + return n +} + +func (m *QueryTeamVestingStatusByTimeResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RequestDate) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Plan != nil { + l = m.Plan.Size() + n += 1 + l + sovQuery(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryVestingStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TotalVestedAmount != 0 { + n += 1 + sovQuery(uint64(m.TotalVestedAmount)) + } + if m.TotalUnlockedAmount != 0 { + n += 1 + sovQuery(uint64(m.TotalUnlockedAmount)) + } + if m.CurrentClaimableAmount != 0 { + n += 1 + sovQuery(uint64(m.CurrentClaimableAmount)) + } + if m.LockedVestedAmount != 0 { + n += 1 + sovQuery(uint64(m.LockedVestedAmount)) + } + if m.RemainingUnvestedAmount != 0 { + n += 1 + sovQuery(uint64(m.RemainingUnvestedAmount)) + } + if m.ClaimedAmount != 0 { + n += 1 + sovQuery(uint64(m.ClaimedAmount)) + } + if m.TotalRewards != 0 { + n += 1 + sovQuery(uint64(m.TotalRewards)) + } + if m.ClaimedRewards != 0 { + n += 1 + sovQuery(uint64(m.ClaimedRewards)) + } + if m.AvailableRewards != 0 { + n += 1 + sovQuery(uint64(m.AvailableRewards)) + } + return n +} + +func (m *QueryVestingPlan) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commencement) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.TokenVestingStart) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.TokenVestingFinished) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.TokenUnlockStart) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.TokenUnlockFinished) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Clawback != 0 { + n += 1 + sovQuery(uint64(m.Clawback)) + } + if m.ClawbackAmount != 0 { + n += 1 + sovQuery(uint64(m.ClawbackAmount)) + } + if m.MaximumVestingAmount != 0 { + n += 1 + sovQuery(uint64(m.MaximumVestingAmount)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryTeamInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalTeamAllocation", wireType) + } + m.TotalTeamAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalTeamAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedTeamAllocation", wireType) + } + m.IssuedTeamAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IssuedTeamAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableTeamAllocation", wireType) + } + m.AvailableTeamAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableTeamAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalAuthorityRewards", wireType) + } + m.TotalAuthorityRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalAuthorityRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimedAuthorityRewards", wireType) + } + m.ClaimedAuthorityRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClaimedAuthorityRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableAuthorityRewards", wireType) + } + m.AvailableAuthorityRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableAuthorityRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalAccountRewards", wireType) + } + m.TotalAccountRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalAccountRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimedAccountRewards", wireType) + } + m.ClaimedAccountRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClaimedAccountRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableAccountRewards", wireType) + } + m.AvailableAccountRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableAccountRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredModuleBalance", wireType) + } + m.RequiredModuleBalance = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequiredModuleBalance |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TeamModuleBalance", wireType) + } + m.TeamModuleBalance = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TeamModuleBalance |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingAccountsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingAccountsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingAccountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingAccountsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingAccountsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingAccountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Accounts = append(m.Accounts, TeamVestingAccount{}) + if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingAccountRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingAccountRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingAccountRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingAccountResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingAccountResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingAccountResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Account", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Account.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestDate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &QueryVestingPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &QueryVestingStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingStatusByTimeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingStatusByTimeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingStatusByTimeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + m.Time = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Time |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryTeamVestingStatusByTimeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryTeamVestingStatusByTimeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryTeamVestingStatusByTimeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestDate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestDate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Plan == nil { + m.Plan = &QueryVestingPlan{} + } + if err := m.Plan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &QueryVestingStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryVestingStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryVestingStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryVestingStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalVestedAmount", wireType) + } + m.TotalVestedAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalVestedAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalUnlockedAmount", wireType) + } + m.TotalUnlockedAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalUnlockedAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentClaimableAmount", wireType) + } + m.CurrentClaimableAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentClaimableAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockedVestedAmount", wireType) + } + m.LockedVestedAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LockedVestedAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingUnvestedAmount", wireType) + } + m.RemainingUnvestedAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RemainingUnvestedAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimedAmount", wireType) + } + m.ClaimedAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClaimedAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRewards", wireType) + } + m.TotalRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimedRewards", wireType) + } + m.ClaimedRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClaimedRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableRewards", wireType) + } + m.AvailableRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryVestingPlan) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryVestingPlan: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryVestingPlan: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commencement", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commencement = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenVestingStart", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenVestingStart = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenVestingFinished", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenVestingFinished = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenUnlockStart", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenUnlockStart = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TokenUnlockFinished", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TokenUnlockFinished = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Clawback", wireType) + } + m.Clawback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Clawback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClawbackAmount", wireType) + } + m.ClawbackAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClawbackAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaximumVestingAmount", wireType) + } + m.MaximumVestingAmount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaximumVestingAmount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/team/types/query.pb.gw.go b/x/team/types/query.pb.gw.go new file mode 100644 index 00000000..8ecf3778 --- /dev/null +++ b/x/team/types/query.pb.gw.go @@ -0,0 +1,543 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: kyve/team/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_TeamInfo_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamInfoRequest + var metadata runtime.ServerMetadata + + msg, err := client.TeamInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TeamInfo_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamInfoRequest + var metadata runtime.ServerMetadata + + msg, err := server.TeamInfo(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TeamVestingAccounts_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingAccountsRequest + var metadata runtime.ServerMetadata + + msg, err := client.TeamVestingAccounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TeamVestingAccounts_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingAccountsRequest + var metadata runtime.ServerMetadata + + msg, err := server.TeamVestingAccounts(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TeamVestingAccount_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingAccountRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.TeamVestingAccount(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TeamVestingAccount_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingAccountRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.TeamVestingAccount(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TeamVestingStatus_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := client.TeamVestingStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TeamVestingStatus_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingStatusRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + msg, err := server.TeamVestingStatus(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_TeamVestingStatusByTime_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingStatusByTimeRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + val, ok = pathParams["time"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "time") + } + + protoReq.Time, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "time", err) + } + + msg, err := client.TeamVestingStatusByTime(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_TeamVestingStatusByTime_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryTeamVestingStatusByTimeRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id") + } + + protoReq.Id, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err) + } + + val, ok = pathParams["time"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "time") + } + + protoReq.Time, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "time", err) + } + + msg, err := server.TeamVestingStatusByTime(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_TeamInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TeamInfo_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingAccounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TeamVestingAccounts_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingAccounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TeamVestingAccount_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TeamVestingStatus_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingStatusByTime_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_TeamVestingStatusByTime_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingStatusByTime_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_TeamInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TeamInfo_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingAccounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TeamVestingAccounts_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingAccounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingAccount_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TeamVestingAccount_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingAccount_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TeamVestingStatus_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingStatus_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_TeamVestingStatusByTime_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_TeamVestingStatusByTime_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_TeamVestingStatusByTime_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_TeamInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "team", "v1beta1", "team_info"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_TeamVestingAccounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"kyve", "team", "v1beta1", "team_vesting_accounts"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_TeamVestingAccount_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "team", "v1beta1", "team_vesting_account", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_TeamVestingStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"kyve", "team", "v1beta1", "team_vesting_status", "id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_TeamVestingStatusByTime_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"kyve", "team", "v1beta1", "team_vesting_status_by_time", "id", "time"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_TeamInfo_0 = runtime.ForwardResponseMessage + + forward_Query_TeamVestingAccounts_0 = runtime.ForwardResponseMessage + + forward_Query_TeamVestingAccount_0 = runtime.ForwardResponseMessage + + forward_Query_TeamVestingStatus_0 = runtime.ForwardResponseMessage + + forward_Query_TeamVestingStatusByTime_0 = runtime.ForwardResponseMessage +) diff --git a/x/team/types/team.pb.go b/x/team/types/team.pb.go new file mode 100644 index 00000000..e2b7a217 --- /dev/null +++ b/x/team/types/team.pb.go @@ -0,0 +1,757 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/team/v1beta1/team.proto + +package types + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Authority ... +type Authority struct { + // total inflation rewards is the total amount of rewards the authority has received ever + TotalRewards uint64 `protobuf:"varint,1,opt,name=total_rewards,json=totalRewards,proto3" json:"total_rewards,omitempty"` + // claimed is the amount of inflation rewards claimed by the authority + RewardsClaimed uint64 `protobuf:"varint,2,opt,name=rewards_claimed,json=rewardsClaimed,proto3" json:"rewards_claimed,omitempty"` +} + +func (m *Authority) Reset() { *m = Authority{} } +func (m *Authority) String() string { return proto.CompactTextString(m) } +func (*Authority) ProtoMessage() {} +func (*Authority) Descriptor() ([]byte, []int) { + return fileDescriptor_a9a907d008be83cf, []int{0} +} +func (m *Authority) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Authority) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Authority.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Authority) XXX_Merge(src proto.Message) { + xxx_messageInfo_Authority.Merge(m, src) +} +func (m *Authority) XXX_Size() int { + return m.Size() +} +func (m *Authority) XXX_DiscardUnknown() { + xxx_messageInfo_Authority.DiscardUnknown(m) +} + +var xxx_messageInfo_Authority proto.InternalMessageInfo + +func (m *Authority) GetTotalRewards() uint64 { + if m != nil { + return m.TotalRewards + } + return 0 +} + +func (m *Authority) GetRewardsClaimed() uint64 { + if m != nil { + return m.RewardsClaimed + } + return 0 +} + +// TeamVestingAccount ... +type TeamVestingAccount struct { + // id is a unique identify for each vesting account, tied to a single team member. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // total_allocation is the number of tokens reserved for this team member. + TotalAllocation uint64 `protobuf:"varint,2,opt,name=total_allocation,json=totalAllocation,proto3" json:"total_allocation,omitempty"` + // commencement is the unix timestamp of the member's official start date in seconds + Commencement uint64 `protobuf:"varint,3,opt,name=commencement,proto3" json:"commencement,omitempty"` + // clawback is a unix timestamp of a clawback in seconds. If timestamp is zero + // it means that the account has not received a clawback + Clawback uint64 `protobuf:"varint,4,opt,name=clawback,proto3" json:"clawback,omitempty"` + // unlocked_claimed is the amount of $KYVE already claimed by the account holder + UnlockedClaimed uint64 `protobuf:"varint,5,opt,name=unlocked_claimed,json=unlockedClaimed,proto3" json:"unlocked_claimed,omitempty"` + // the last time the unlocked amount was claimed + LastClaimedTime uint64 `protobuf:"varint,6,opt,name=last_claimed_time,json=lastClaimedTime,proto3" json:"last_claimed_time,omitempty"` + // total rewards is the total amount of rewards the account has received ever + TotalRewards uint64 `protobuf:"varint,7,opt,name=total_rewards,json=totalRewards,proto3" json:"total_rewards,omitempty"` + // rewards claimed is the amount inflation rewards claimed by account holder + RewardsClaimed uint64 `protobuf:"varint,8,opt,name=rewards_claimed,json=rewardsClaimed,proto3" json:"rewards_claimed,omitempty"` +} + +func (m *TeamVestingAccount) Reset() { *m = TeamVestingAccount{} } +func (m *TeamVestingAccount) String() string { return proto.CompactTextString(m) } +func (*TeamVestingAccount) ProtoMessage() {} +func (*TeamVestingAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_a9a907d008be83cf, []int{1} +} +func (m *TeamVestingAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TeamVestingAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TeamVestingAccount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TeamVestingAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_TeamVestingAccount.Merge(m, src) +} +func (m *TeamVestingAccount) XXX_Size() int { + return m.Size() +} +func (m *TeamVestingAccount) XXX_DiscardUnknown() { + xxx_messageInfo_TeamVestingAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_TeamVestingAccount proto.InternalMessageInfo + +func (m *TeamVestingAccount) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *TeamVestingAccount) GetTotalAllocation() uint64 { + if m != nil { + return m.TotalAllocation + } + return 0 +} + +func (m *TeamVestingAccount) GetCommencement() uint64 { + if m != nil { + return m.Commencement + } + return 0 +} + +func (m *TeamVestingAccount) GetClawback() uint64 { + if m != nil { + return m.Clawback + } + return 0 +} + +func (m *TeamVestingAccount) GetUnlockedClaimed() uint64 { + if m != nil { + return m.UnlockedClaimed + } + return 0 +} + +func (m *TeamVestingAccount) GetLastClaimedTime() uint64 { + if m != nil { + return m.LastClaimedTime + } + return 0 +} + +func (m *TeamVestingAccount) GetTotalRewards() uint64 { + if m != nil { + return m.TotalRewards + } + return 0 +} + +func (m *TeamVestingAccount) GetRewardsClaimed() uint64 { + if m != nil { + return m.RewardsClaimed + } + return 0 +} + +func init() { + proto.RegisterType((*Authority)(nil), "kyve.team.v1beta1.Authority") + proto.RegisterType((*TeamVestingAccount)(nil), "kyve.team.v1beta1.TeamVestingAccount") +} + +func init() { proto.RegisterFile("kyve/team/v1beta1/team.proto", fileDescriptor_a9a907d008be83cf) } + +var fileDescriptor_a9a907d008be83cf = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xc1, 0x4e, 0xc2, 0x40, + 0x10, 0x86, 0x69, 0x45, 0xc4, 0x0d, 0x82, 0xec, 0xa9, 0x31, 0xa6, 0x31, 0x78, 0x50, 0x3c, 0xd0, + 0x10, 0x9f, 0x00, 0x89, 0x27, 0x13, 0x0f, 0x84, 0x90, 0xe0, 0x85, 0x6c, 0xb7, 0x13, 0xd8, 0xb4, + 0xdb, 0x25, 0xed, 0x14, 0xe4, 0x2d, 0x7c, 0x18, 0x1f, 0xc2, 0x23, 0x47, 0x8f, 0x06, 0x5e, 0xc4, + 0xb0, 0xbb, 0x10, 0x13, 0x3d, 0x78, 0x9c, 0xef, 0xff, 0x33, 0xf3, 0xe7, 0x1f, 0x72, 0x19, 0xaf, + 0x16, 0x10, 0x20, 0x30, 0x19, 0x2c, 0xba, 0x21, 0x20, 0xeb, 0xea, 0xa1, 0x33, 0xcf, 0x14, 0x2a, + 0xda, 0xdc, 0xa9, 0x1d, 0x0d, 0xac, 0xda, 0x1a, 0x93, 0xd3, 0x5e, 0x81, 0x33, 0x95, 0x09, 0x5c, + 0xd1, 0x6b, 0x72, 0x86, 0x0a, 0x59, 0x32, 0xc9, 0x60, 0xc9, 0xb2, 0x28, 0xf7, 0x9c, 0x2b, 0xe7, + 0xb6, 0x3c, 0xa8, 0x69, 0x38, 0x30, 0x8c, 0xde, 0x90, 0x86, 0x95, 0x27, 0x3c, 0x61, 0x42, 0x42, + 0xe4, 0xb9, 0xda, 0x56, 0xb7, 0xb8, 0x6f, 0x68, 0xeb, 0xdd, 0x25, 0x74, 0x08, 0x4c, 0x8e, 0x20, + 0x47, 0x91, 0x4e, 0x7b, 0x9c, 0xab, 0x22, 0x45, 0x5a, 0x27, 0xae, 0x88, 0xec, 0x66, 0x57, 0x44, + 0xb4, 0x4d, 0xce, 0xcd, 0x51, 0x96, 0x24, 0x8a, 0x33, 0x14, 0x2a, 0xb5, 0x0b, 0x1b, 0x9a, 0xf7, + 0x0e, 0x98, 0xb6, 0x48, 0x8d, 0x2b, 0x29, 0x21, 0xe5, 0x20, 0x21, 0x45, 0xef, 0xc8, 0xc4, 0xfb, + 0xc9, 0xe8, 0x05, 0xa9, 0xf2, 0x84, 0x2d, 0x43, 0xc6, 0x63, 0xaf, 0xac, 0xf5, 0xc3, 0xbc, 0x3b, + 0x55, 0xa4, 0x89, 0xe2, 0x31, 0x44, 0x87, 0xec, 0xc7, 0xe6, 0xd4, 0x9e, 0xdb, 0xf0, 0xf4, 0x8e, + 0x34, 0x13, 0x96, 0xe3, 0xde, 0x36, 0x41, 0x21, 0xc1, 0xab, 0x18, 0xef, 0x4e, 0xb0, 0xbe, 0xa1, + 0x90, 0xf0, 0xbb, 0xb6, 0x93, 0xff, 0xd5, 0x56, 0xfd, 0xab, 0xb6, 0x87, 0xfe, 0xc7, 0xc6, 0x77, + 0xd6, 0x1b, 0xdf, 0xf9, 0xda, 0xf8, 0xce, 0xdb, 0xd6, 0x2f, 0xad, 0xb7, 0x7e, 0xe9, 0x73, 0xeb, + 0x97, 0x5e, 0xda, 0x53, 0x81, 0xb3, 0x22, 0xec, 0x70, 0x25, 0x83, 0xa7, 0xf1, 0xe8, 0xf1, 0x19, + 0x70, 0xa9, 0xb2, 0x38, 0xe0, 0x33, 0x26, 0xd2, 0xe0, 0xd5, 0xbc, 0x1d, 0x57, 0x73, 0xc8, 0xc3, + 0x8a, 0x7e, 0xf8, 0xfd, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xc5, 0x6b, 0x77, 0x10, 0x02, + 0x00, 0x00, +} + +func (m *Authority) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Authority) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Authority) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RewardsClaimed != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.RewardsClaimed)) + i-- + dAtA[i] = 0x10 + } + if m.TotalRewards != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.TotalRewards)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TeamVestingAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TeamVestingAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TeamVestingAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RewardsClaimed != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.RewardsClaimed)) + i-- + dAtA[i] = 0x40 + } + if m.TotalRewards != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.TotalRewards)) + i-- + dAtA[i] = 0x38 + } + if m.LastClaimedTime != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.LastClaimedTime)) + i-- + dAtA[i] = 0x30 + } + if m.UnlockedClaimed != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.UnlockedClaimed)) + i-- + dAtA[i] = 0x28 + } + if m.Clawback != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.Clawback)) + i-- + dAtA[i] = 0x20 + } + if m.Commencement != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.Commencement)) + i-- + dAtA[i] = 0x18 + } + if m.TotalAllocation != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.TotalAllocation)) + i-- + dAtA[i] = 0x10 + } + if m.Id != 0 { + i = encodeVarintTeam(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTeam(dAtA []byte, offset int, v uint64) int { + offset -= sovTeam(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Authority) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TotalRewards != 0 { + n += 1 + sovTeam(uint64(m.TotalRewards)) + } + if m.RewardsClaimed != 0 { + n += 1 + sovTeam(uint64(m.RewardsClaimed)) + } + return n +} + +func (m *TeamVestingAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Id != 0 { + n += 1 + sovTeam(uint64(m.Id)) + } + if m.TotalAllocation != 0 { + n += 1 + sovTeam(uint64(m.TotalAllocation)) + } + if m.Commencement != 0 { + n += 1 + sovTeam(uint64(m.Commencement)) + } + if m.Clawback != 0 { + n += 1 + sovTeam(uint64(m.Clawback)) + } + if m.UnlockedClaimed != 0 { + n += 1 + sovTeam(uint64(m.UnlockedClaimed)) + } + if m.LastClaimedTime != 0 { + n += 1 + sovTeam(uint64(m.LastClaimedTime)) + } + if m.TotalRewards != 0 { + n += 1 + sovTeam(uint64(m.TotalRewards)) + } + if m.RewardsClaimed != 0 { + n += 1 + sovTeam(uint64(m.RewardsClaimed)) + } + return n +} + +func sovTeam(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTeam(x uint64) (n int) { + return sovTeam(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Authority) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Authority: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Authority: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRewards", wireType) + } + m.TotalRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RewardsClaimed", wireType) + } + m.RewardsClaimed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RewardsClaimed |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTeam(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTeam + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TeamVestingAccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TeamVestingAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TeamVestingAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalAllocation", wireType) + } + m.TotalAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commencement", wireType) + } + m.Commencement = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commencement |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Clawback", wireType) + } + m.Clawback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Clawback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockedClaimed", wireType) + } + m.UnlockedClaimed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnlockedClaimed |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastClaimedTime", wireType) + } + m.LastClaimedTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastClaimedTime |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalRewards", wireType) + } + m.TotalRewards = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalRewards |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RewardsClaimed", wireType) + } + m.RewardsClaimed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTeam + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RewardsClaimed |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTeam(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTeam + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTeam(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTeam + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTeam + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTeam + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTeam + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTeam + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTeam + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTeam = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTeam = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTeam = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/team/types/tx.pb.go b/x/team/types/tx.pb.go new file mode 100644 index 00000000..ff1e7f87 --- /dev/null +++ b/x/team/types/tx.pb.go @@ -0,0 +1,2348 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: kyve/team/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgClaimUnlockedTokens ... +type MsgClaimUnlocked struct { + // authority is the foundation which is allowed to payout unlocked tokens + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // id is the unique identifier of the team member + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // amount of $KYVE that will be paid to the recipient and marked as deducted from the unlocked amount. + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` + // recipient is the recipient address chosen by the team member. + Recipient string `protobuf:"bytes,4,opt,name=recipient,proto3" json:"recipient,omitempty"` +} + +func (m *MsgClaimUnlocked) Reset() { *m = MsgClaimUnlocked{} } +func (m *MsgClaimUnlocked) String() string { return proto.CompactTextString(m) } +func (*MsgClaimUnlocked) ProtoMessage() {} +func (*MsgClaimUnlocked) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{0} +} +func (m *MsgClaimUnlocked) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimUnlocked) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimUnlocked.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimUnlocked) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimUnlocked.Merge(m, src) +} +func (m *MsgClaimUnlocked) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimUnlocked) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimUnlocked.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimUnlocked proto.InternalMessageInfo + +func (m *MsgClaimUnlocked) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgClaimUnlocked) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MsgClaimUnlocked) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *MsgClaimUnlocked) GetRecipient() string { + if m != nil { + return m.Recipient + } + return "" +} + +// MsgClaimUnlockedResponse defines the Msg/ClaimUnlockedTokens response type. +type MsgClaimUnlockedResponse struct { +} + +func (m *MsgClaimUnlockedResponse) Reset() { *m = MsgClaimUnlockedResponse{} } +func (m *MsgClaimUnlockedResponse) String() string { return proto.CompactTextString(m) } +func (*MsgClaimUnlockedResponse) ProtoMessage() {} +func (*MsgClaimUnlockedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{1} +} +func (m *MsgClaimUnlockedResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimUnlockedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimUnlockedResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimUnlockedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimUnlockedResponse.Merge(m, src) +} +func (m *MsgClaimUnlockedResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimUnlockedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimUnlockedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimUnlockedResponse proto.InternalMessageInfo + +// MsgClaimAuthorityRewards ... +type MsgClaimAuthorityRewards struct { + // authority is the foundation which is allowed to payout unlocked tokens + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // amount of $KYVE that will be paid to the recipient and marked as deducted from the authority inflation rewards + Amount uint64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // recipient is the recipient address chosen by the team member. + Recipient string `protobuf:"bytes,3,opt,name=recipient,proto3" json:"recipient,omitempty"` +} + +func (m *MsgClaimAuthorityRewards) Reset() { *m = MsgClaimAuthorityRewards{} } +func (m *MsgClaimAuthorityRewards) String() string { return proto.CompactTextString(m) } +func (*MsgClaimAuthorityRewards) ProtoMessage() {} +func (*MsgClaimAuthorityRewards) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{2} +} +func (m *MsgClaimAuthorityRewards) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimAuthorityRewards) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimAuthorityRewards.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimAuthorityRewards) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimAuthorityRewards.Merge(m, src) +} +func (m *MsgClaimAuthorityRewards) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimAuthorityRewards) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimAuthorityRewards.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimAuthorityRewards proto.InternalMessageInfo + +func (m *MsgClaimAuthorityRewards) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgClaimAuthorityRewards) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *MsgClaimAuthorityRewards) GetRecipient() string { + if m != nil { + return m.Recipient + } + return "" +} + +// MsgClaimAuthorityRewardsResponse defines the Msg/ClaimAuthorityRewards response type. +type MsgClaimAuthorityRewardsResponse struct { +} + +func (m *MsgClaimAuthorityRewardsResponse) Reset() { *m = MsgClaimAuthorityRewardsResponse{} } +func (m *MsgClaimAuthorityRewardsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgClaimAuthorityRewardsResponse) ProtoMessage() {} +func (*MsgClaimAuthorityRewardsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{3} +} +func (m *MsgClaimAuthorityRewardsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimAuthorityRewardsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimAuthorityRewardsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimAuthorityRewardsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimAuthorityRewardsResponse.Merge(m, src) +} +func (m *MsgClaimAuthorityRewardsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimAuthorityRewardsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimAuthorityRewardsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimAuthorityRewardsResponse proto.InternalMessageInfo + +// MsgClaimAccountRewards ... +type MsgClaimAccountRewards struct { + // authority is the foundation which is allowed to payout unlocked tokens + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // id is the unique identifier of the team member + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // amount of $KYVE that will be paid to the recipient and marked as deducted from the inflation rewards + Amount uint64 `protobuf:"varint,3,opt,name=amount,proto3" json:"amount,omitempty"` + // recipient is the recipient address chosen by the team member. + Recipient string `protobuf:"bytes,4,opt,name=recipient,proto3" json:"recipient,omitempty"` +} + +func (m *MsgClaimAccountRewards) Reset() { *m = MsgClaimAccountRewards{} } +func (m *MsgClaimAccountRewards) String() string { return proto.CompactTextString(m) } +func (*MsgClaimAccountRewards) ProtoMessage() {} +func (*MsgClaimAccountRewards) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{4} +} +func (m *MsgClaimAccountRewards) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimAccountRewards) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimAccountRewards.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimAccountRewards) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimAccountRewards.Merge(m, src) +} +func (m *MsgClaimAccountRewards) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimAccountRewards) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimAccountRewards.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimAccountRewards proto.InternalMessageInfo + +func (m *MsgClaimAccountRewards) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgClaimAccountRewards) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MsgClaimAccountRewards) GetAmount() uint64 { + if m != nil { + return m.Amount + } + return 0 +} + +func (m *MsgClaimAccountRewards) GetRecipient() string { + if m != nil { + return m.Recipient + } + return "" +} + +// MsgClaimAccountRewardsResponse defines the Msg/ClaimAccountRewards response type. +type MsgClaimAccountRewardsResponse struct { +} + +func (m *MsgClaimAccountRewardsResponse) Reset() { *m = MsgClaimAccountRewardsResponse{} } +func (m *MsgClaimAccountRewardsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgClaimAccountRewardsResponse) ProtoMessage() {} +func (*MsgClaimAccountRewardsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{5} +} +func (m *MsgClaimAccountRewardsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClaimAccountRewardsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClaimAccountRewardsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClaimAccountRewardsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClaimAccountRewardsResponse.Merge(m, src) +} +func (m *MsgClaimAccountRewardsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgClaimAccountRewardsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClaimAccountRewardsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClaimAccountRewardsResponse proto.InternalMessageInfo + +// MsgClawback ... +type MsgClawback struct { + // authority is the foundation which is allowed to modify team accounts + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // id is the unique identifier of the team member + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // clawback is a unix timestamp (in seconds) of when the clawback should be applied + Clawback uint64 `protobuf:"varint,3,opt,name=clawback,proto3" json:"clawback,omitempty"` +} + +func (m *MsgClawback) Reset() { *m = MsgClawback{} } +func (m *MsgClawback) String() string { return proto.CompactTextString(m) } +func (*MsgClawback) ProtoMessage() {} +func (*MsgClawback) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{6} +} +func (m *MsgClawback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClawback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClawback.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClawback) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClawback.Merge(m, src) +} +func (m *MsgClawback) XXX_Size() int { + return m.Size() +} +func (m *MsgClawback) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClawback.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClawback proto.InternalMessageInfo + +func (m *MsgClawback) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgClawback) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *MsgClawback) GetClawback() uint64 { + if m != nil { + return m.Clawback + } + return 0 +} + +// MsgClawbackResponse defines the Msg/Clawback response type. +type MsgClawbackResponse struct { +} + +func (m *MsgClawbackResponse) Reset() { *m = MsgClawbackResponse{} } +func (m *MsgClawbackResponse) String() string { return proto.CompactTextString(m) } +func (*MsgClawbackResponse) ProtoMessage() {} +func (*MsgClawbackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{7} +} +func (m *MsgClawbackResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgClawbackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgClawbackResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgClawbackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgClawbackResponse.Merge(m, src) +} +func (m *MsgClawbackResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgClawbackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgClawbackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgClawbackResponse proto.InternalMessageInfo + +// MsgCreateTeamVestingAccount ... +type MsgCreateTeamVestingAccount struct { + // authority ... + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // total_allocation is the number of tokens reserved for this team member. + TotalAllocation uint64 `protobuf:"varint,2,opt,name=total_allocation,json=totalAllocation,proto3" json:"total_allocation,omitempty"` + // commencement is the unix timestamp of the member's official start date. + Commencement uint64 `protobuf:"varint,3,opt,name=commencement,proto3" json:"commencement,omitempty"` +} + +func (m *MsgCreateTeamVestingAccount) Reset() { *m = MsgCreateTeamVestingAccount{} } +func (m *MsgCreateTeamVestingAccount) String() string { return proto.CompactTextString(m) } +func (*MsgCreateTeamVestingAccount) ProtoMessage() {} +func (*MsgCreateTeamVestingAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{8} +} +func (m *MsgCreateTeamVestingAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateTeamVestingAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateTeamVestingAccount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateTeamVestingAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateTeamVestingAccount.Merge(m, src) +} +func (m *MsgCreateTeamVestingAccount) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateTeamVestingAccount) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateTeamVestingAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateTeamVestingAccount proto.InternalMessageInfo + +func (m *MsgCreateTeamVestingAccount) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgCreateTeamVestingAccount) GetTotalAllocation() uint64 { + if m != nil { + return m.TotalAllocation + } + return 0 +} + +func (m *MsgCreateTeamVestingAccount) GetCommencement() uint64 { + if m != nil { + return m.Commencement + } + return 0 +} + +// MsgCreateTeamVestingAccountResponse defines the Msg/CreateTeamVestingAccount response type. +type MsgCreateTeamVestingAccountResponse struct { +} + +func (m *MsgCreateTeamVestingAccountResponse) Reset() { *m = MsgCreateTeamVestingAccountResponse{} } +func (m *MsgCreateTeamVestingAccountResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateTeamVestingAccountResponse) ProtoMessage() {} +func (*MsgCreateTeamVestingAccountResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1ad042ec4c659ded, []int{9} +} +func (m *MsgCreateTeamVestingAccountResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateTeamVestingAccountResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateTeamVestingAccountResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateTeamVestingAccountResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateTeamVestingAccountResponse.Merge(m, src) +} +func (m *MsgCreateTeamVestingAccountResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateTeamVestingAccountResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateTeamVestingAccountResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateTeamVestingAccountResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgClaimUnlocked)(nil), "kyve.team.v1beta1.MsgClaimUnlocked") + proto.RegisterType((*MsgClaimUnlockedResponse)(nil), "kyve.team.v1beta1.MsgClaimUnlockedResponse") + proto.RegisterType((*MsgClaimAuthorityRewards)(nil), "kyve.team.v1beta1.MsgClaimAuthorityRewards") + proto.RegisterType((*MsgClaimAuthorityRewardsResponse)(nil), "kyve.team.v1beta1.MsgClaimAuthorityRewardsResponse") + proto.RegisterType((*MsgClaimAccountRewards)(nil), "kyve.team.v1beta1.MsgClaimAccountRewards") + proto.RegisterType((*MsgClaimAccountRewardsResponse)(nil), "kyve.team.v1beta1.MsgClaimAccountRewardsResponse") + proto.RegisterType((*MsgClawback)(nil), "kyve.team.v1beta1.MsgClawback") + proto.RegisterType((*MsgClawbackResponse)(nil), "kyve.team.v1beta1.MsgClawbackResponse") + proto.RegisterType((*MsgCreateTeamVestingAccount)(nil), "kyve.team.v1beta1.MsgCreateTeamVestingAccount") + proto.RegisterType((*MsgCreateTeamVestingAccountResponse)(nil), "kyve.team.v1beta1.MsgCreateTeamVestingAccountResponse") +} + +func init() { proto.RegisterFile("kyve/team/v1beta1/tx.proto", fileDescriptor_1ad042ec4c659ded) } + +var fileDescriptor_1ad042ec4c659ded = []byte{ + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcf, 0x6e, 0xd3, 0x40, + 0x10, 0xc6, 0xb3, 0x49, 0x54, 0xb5, 0xc3, 0xbf, 0xe2, 0xd2, 0xca, 0x18, 0xc9, 0x8a, 0x5c, 0x81, + 0x1a, 0x21, 0x6c, 0x85, 0x4a, 0xbd, 0xa7, 0x15, 0x27, 0x54, 0x0e, 0x01, 0x2a, 0xc1, 0xa5, 0xda, + 0xac, 0x47, 0xc9, 0x2a, 0xb6, 0x37, 0x78, 0x37, 0x4d, 0x73, 0xe5, 0x09, 0x78, 0x06, 0xc4, 0x89, + 0x13, 0x48, 0x3c, 0x04, 0xc7, 0x8a, 0x13, 0x47, 0x94, 0xbc, 0x08, 0xb2, 0xe3, 0x6c, 0x48, 0x88, + 0x43, 0x5a, 0xe0, 0xb8, 0x33, 0x3f, 0xcf, 0x7c, 0x9f, 0x47, 0xb3, 0x0b, 0x56, 0x67, 0x70, 0x86, + 0x9e, 0x42, 0x1a, 0x7a, 0x67, 0xb5, 0x26, 0x2a, 0x5a, 0xf3, 0xd4, 0xb9, 0xdb, 0x8d, 0x85, 0x12, + 0xc6, 0xed, 0x24, 0xe7, 0x26, 0x39, 0x37, 0xcb, 0x59, 0x77, 0x99, 0x90, 0xa1, 0x90, 0xa7, 0x29, + 0xe0, 0x8d, 0x0f, 0x63, 0xda, 0xf9, 0x48, 0x60, 0xf3, 0x58, 0xb6, 0x8e, 0x02, 0xca, 0xc3, 0x97, + 0x51, 0x20, 0x58, 0x07, 0x7d, 0xe3, 0x00, 0x36, 0x68, 0x4f, 0xb5, 0x45, 0xcc, 0xd5, 0xc0, 0x24, + 0x15, 0xb2, 0xb7, 0x71, 0x68, 0x7e, 0xfb, 0xf2, 0xe8, 0x4e, 0xf6, 0x65, 0xdd, 0xf7, 0x63, 0x94, + 0xf2, 0xb9, 0x8a, 0x79, 0xd4, 0x6a, 0x4c, 0x51, 0xe3, 0x26, 0x14, 0xb9, 0x6f, 0x16, 0x2b, 0x64, + 0xaf, 0xdc, 0x28, 0x72, 0xdf, 0xd8, 0x81, 0x35, 0x1a, 0x8a, 0x5e, 0xa4, 0xcc, 0x52, 0x1a, 0xcb, + 0x4e, 0x49, 0xfd, 0x18, 0x19, 0xef, 0x72, 0x8c, 0x94, 0x59, 0xfe, 0x53, 0x7d, 0x8d, 0x3a, 0x16, + 0x98, 0xf3, 0x5a, 0x1b, 0x28, 0xbb, 0x22, 0x92, 0xe8, 0xbc, 0x27, 0xd3, 0x64, 0x7d, 0xa2, 0xa8, + 0x81, 0x7d, 0x1a, 0xfb, 0xf2, 0xca, 0x86, 0xa6, 0x06, 0x8a, 0xf9, 0x06, 0x4a, 0xab, 0x1b, 0x70, + 0xa0, 0x92, 0xa7, 0x51, 0x1b, 0xf9, 0x44, 0x60, 0x47, 0x43, 0x8c, 0x25, 0xfd, 0xfe, 0xd6, 0xc6, + 0xff, 0x9e, 0x4b, 0x05, 0xec, 0xc5, 0x8a, 0xb5, 0xa9, 0x37, 0x70, 0x6d, 0x4c, 0xf4, 0x9b, 0x94, + 0x75, 0xfe, 0x99, 0x11, 0x0b, 0xd6, 0x59, 0x56, 0x33, 0xb3, 0xa2, 0xcf, 0xce, 0x36, 0x6c, 0xfd, + 0xd2, 0x52, 0x2b, 0xf9, 0x40, 0xe0, 0x5e, 0x12, 0x8f, 0x91, 0x2a, 0x7c, 0x81, 0x34, 0x3c, 0x41, + 0xa9, 0x78, 0xd4, 0xca, 0x84, 0x5f, 0x59, 0x5a, 0x15, 0x36, 0x95, 0x50, 0x34, 0x38, 0xa5, 0x41, + 0x20, 0x18, 0x55, 0x5c, 0x44, 0x99, 0xd0, 0x5b, 0x69, 0xbc, 0xae, 0xc3, 0x86, 0x03, 0xd7, 0x99, + 0x08, 0x43, 0x8c, 0x18, 0x86, 0xa8, 0x87, 0x30, 0x13, 0x73, 0xee, 0xc3, 0xee, 0x12, 0x95, 0x13, + 0x37, 0x8f, 0x3f, 0x97, 0xa1, 0x74, 0x2c, 0x5b, 0x06, 0x85, 0x1b, 0xb3, 0x2b, 0xbc, 0xeb, 0xfe, + 0x76, 0x0d, 0xb8, 0xf3, 0xbb, 0x63, 0x3d, 0x5c, 0x01, 0x9a, 0xb4, 0x32, 0x1a, 0xb0, 0xae, 0xe7, + 0x67, 0xe7, 0x7e, 0x98, 0xe6, 0xad, 0x07, 0xcb, 0xf3, 0xba, 0xe6, 0x5b, 0x02, 0x66, 0xee, 0x24, + 0xdc, 0x9c, 0x22, 0x39, 0xbc, 0x75, 0x70, 0x39, 0x5e, 0x8b, 0x18, 0xc0, 0xf6, 0xe2, 0x5b, 0x63, + 0xd9, 0xef, 0x99, 0x87, 0xad, 0xfd, 0x4b, 0xc0, 0xba, 0xb5, 0x84, 0xad, 0x45, 0x7b, 0x5e, 0x5d, + 0x56, 0x6b, 0x06, 0xb5, 0x6a, 0x2b, 0xa3, 0x93, 0xa6, 0x87, 0x47, 0x5f, 0x87, 0x36, 0xb9, 0x18, + 0xda, 0xe4, 0xc7, 0xd0, 0x26, 0xef, 0x46, 0x76, 0xe1, 0x62, 0x64, 0x17, 0xbe, 0x8f, 0xec, 0xc2, + 0xeb, 0x6a, 0x8b, 0xab, 0x76, 0xaf, 0xe9, 0x32, 0x11, 0x7a, 0x4f, 0x5f, 0x9d, 0x3c, 0x79, 0x86, + 0xaa, 0x2f, 0xe2, 0x8e, 0xc7, 0xda, 0x94, 0x47, 0xde, 0xf9, 0xf8, 0xc1, 0x51, 0x83, 0x2e, 0xca, + 0xe6, 0x5a, 0xfa, 0x7c, 0xec, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x98, 0xab, 0x55, 0x32, 0x8a, + 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // ClaimUnlocked ... + ClaimUnlocked(ctx context.Context, in *MsgClaimUnlocked, opts ...grpc.CallOption) (*MsgClaimUnlockedResponse, error) + // Clawback ... + Clawback(ctx context.Context, in *MsgClawback, opts ...grpc.CallOption) (*MsgClawbackResponse, error) + // CreateTeamVestingAccount ... + CreateTeamVestingAccount(ctx context.Context, in *MsgCreateTeamVestingAccount, opts ...grpc.CallOption) (*MsgCreateTeamVestingAccountResponse, error) + // ClaimAuthorityRewards ... + ClaimAuthorityRewards(ctx context.Context, in *MsgClaimAuthorityRewards, opts ...grpc.CallOption) (*MsgClaimAuthorityRewardsResponse, error) + // ClaimInflationRewards ... + ClaimAccountRewards(ctx context.Context, in *MsgClaimAccountRewards, opts ...grpc.CallOption) (*MsgClaimAccountRewardsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) ClaimUnlocked(ctx context.Context, in *MsgClaimUnlocked, opts ...grpc.CallOption) (*MsgClaimUnlockedResponse, error) { + out := new(MsgClaimUnlockedResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Msg/ClaimUnlocked", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) Clawback(ctx context.Context, in *MsgClawback, opts ...grpc.CallOption) (*MsgClawbackResponse, error) { + out := new(MsgClawbackResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Msg/Clawback", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CreateTeamVestingAccount(ctx context.Context, in *MsgCreateTeamVestingAccount, opts ...grpc.CallOption) (*MsgCreateTeamVestingAccountResponse, error) { + out := new(MsgCreateTeamVestingAccountResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Msg/CreateTeamVestingAccount", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ClaimAuthorityRewards(ctx context.Context, in *MsgClaimAuthorityRewards, opts ...grpc.CallOption) (*MsgClaimAuthorityRewardsResponse, error) { + out := new(MsgClaimAuthorityRewardsResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Msg/ClaimAuthorityRewards", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ClaimAccountRewards(ctx context.Context, in *MsgClaimAccountRewards, opts ...grpc.CallOption) (*MsgClaimAccountRewardsResponse, error) { + out := new(MsgClaimAccountRewardsResponse) + err := c.cc.Invoke(ctx, "/kyve.team.v1beta1.Msg/ClaimAccountRewards", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // ClaimUnlocked ... + ClaimUnlocked(context.Context, *MsgClaimUnlocked) (*MsgClaimUnlockedResponse, error) + // Clawback ... + Clawback(context.Context, *MsgClawback) (*MsgClawbackResponse, error) + // CreateTeamVestingAccount ... + CreateTeamVestingAccount(context.Context, *MsgCreateTeamVestingAccount) (*MsgCreateTeamVestingAccountResponse, error) + // ClaimAuthorityRewards ... + ClaimAuthorityRewards(context.Context, *MsgClaimAuthorityRewards) (*MsgClaimAuthorityRewardsResponse, error) + // ClaimInflationRewards ... + ClaimAccountRewards(context.Context, *MsgClaimAccountRewards) (*MsgClaimAccountRewardsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) ClaimUnlocked(ctx context.Context, req *MsgClaimUnlocked) (*MsgClaimUnlockedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClaimUnlocked not implemented") +} +func (*UnimplementedMsgServer) Clawback(ctx context.Context, req *MsgClawback) (*MsgClawbackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Clawback not implemented") +} +func (*UnimplementedMsgServer) CreateTeamVestingAccount(ctx context.Context, req *MsgCreateTeamVestingAccount) (*MsgCreateTeamVestingAccountResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateTeamVestingAccount not implemented") +} +func (*UnimplementedMsgServer) ClaimAuthorityRewards(ctx context.Context, req *MsgClaimAuthorityRewards) (*MsgClaimAuthorityRewardsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClaimAuthorityRewards not implemented") +} +func (*UnimplementedMsgServer) ClaimAccountRewards(ctx context.Context, req *MsgClaimAccountRewards) (*MsgClaimAccountRewardsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ClaimAccountRewards not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_ClaimUnlocked_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgClaimUnlocked) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ClaimUnlocked(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Msg/ClaimUnlocked", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ClaimUnlocked(ctx, req.(*MsgClaimUnlocked)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_Clawback_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgClawback) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Clawback(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Msg/Clawback", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Clawback(ctx, req.(*MsgClawback)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CreateTeamVestingAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateTeamVestingAccount) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateTeamVestingAccount(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Msg/CreateTeamVestingAccount", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateTeamVestingAccount(ctx, req.(*MsgCreateTeamVestingAccount)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ClaimAuthorityRewards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgClaimAuthorityRewards) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ClaimAuthorityRewards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Msg/ClaimAuthorityRewards", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ClaimAuthorityRewards(ctx, req.(*MsgClaimAuthorityRewards)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ClaimAccountRewards_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgClaimAccountRewards) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ClaimAccountRewards(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kyve.team.v1beta1.Msg/ClaimAccountRewards", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ClaimAccountRewards(ctx, req.(*MsgClaimAccountRewards)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "kyve.team.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ClaimUnlocked", + Handler: _Msg_ClaimUnlocked_Handler, + }, + { + MethodName: "Clawback", + Handler: _Msg_Clawback_Handler, + }, + { + MethodName: "CreateTeamVestingAccount", + Handler: _Msg_CreateTeamVestingAccount_Handler, + }, + { + MethodName: "ClaimAuthorityRewards", + Handler: _Msg_ClaimAuthorityRewards_Handler, + }, + { + MethodName: "ClaimAccountRewards", + Handler: _Msg_ClaimAccountRewards_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "kyve/team/v1beta1/tx.proto", +} + +func (m *MsgClaimUnlocked) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimUnlocked) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimUnlocked) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Recipient) > 0 { + i -= len(m.Recipient) + copy(dAtA[i:], m.Recipient) + i = encodeVarintTx(dAtA, i, uint64(len(m.Recipient))) + i-- + dAtA[i] = 0x22 + } + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgClaimUnlockedResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimUnlockedResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimUnlockedResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgClaimAuthorityRewards) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimAuthorityRewards) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimAuthorityRewards) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Recipient) > 0 { + i -= len(m.Recipient) + copy(dAtA[i:], m.Recipient) + i = encodeVarintTx(dAtA, i, uint64(len(m.Recipient))) + i-- + dAtA[i] = 0x1a + } + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgClaimAuthorityRewardsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimAuthorityRewardsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimAuthorityRewardsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgClaimAccountRewards) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimAccountRewards) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimAccountRewards) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Recipient) > 0 { + i -= len(m.Recipient) + copy(dAtA[i:], m.Recipient) + i = encodeVarintTx(dAtA, i, uint64(len(m.Recipient))) + i-- + dAtA[i] = 0x22 + } + if m.Amount != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Amount)) + i-- + dAtA[i] = 0x18 + } + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgClaimAccountRewardsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClaimAccountRewardsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClaimAccountRewardsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgClawback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClawback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClawback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Clawback != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Clawback)) + i-- + dAtA[i] = 0x18 + } + if m.Id != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Id)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgClawbackResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgClawbackResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgClawbackResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCreateTeamVestingAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateTeamVestingAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateTeamVestingAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Commencement != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Commencement)) + i-- + dAtA[i] = 0x18 + } + if m.TotalAllocation != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.TotalAllocation)) + i-- + dAtA[i] = 0x10 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateTeamVestingAccountResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateTeamVestingAccountResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateTeamVestingAccountResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgClaimUnlocked) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgClaimUnlockedResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgClaimAuthorityRewards) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgClaimAuthorityRewardsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgClaimAccountRewards) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + if m.Amount != 0 { + n += 1 + sovTx(uint64(m.Amount)) + } + l = len(m.Recipient) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgClaimAccountRewardsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgClawback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.Id != 0 { + n += 1 + sovTx(uint64(m.Id)) + } + if m.Clawback != 0 { + n += 1 + sovTx(uint64(m.Clawback)) + } + return n +} + +func (m *MsgClawbackResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCreateTeamVestingAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + if m.TotalAllocation != 0 { + n += 1 + sovTx(uint64(m.TotalAllocation)) + } + if m.Commencement != 0 { + n += 1 + sovTx(uint64(m.Commencement)) + } + return n +} + +func (m *MsgCreateTeamVestingAccountResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgClaimUnlocked) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimUnlocked: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimUnlocked: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimUnlockedResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimUnlockedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimUnlockedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimAuthorityRewards) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimAuthorityRewards: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimAuthorityRewards: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimAuthorityRewardsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimAuthorityRewardsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimAuthorityRewardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimAccountRewards) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimAccountRewards: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimAccountRewards: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + m.Amount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Amount |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Recipient", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Recipient = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClaimAccountRewardsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClaimAccountRewardsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClaimAccountRewardsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClawback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClawback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClawback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Clawback", wireType) + } + m.Clawback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Clawback |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgClawbackResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgClawbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgClawbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateTeamVestingAccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateTeamVestingAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateTeamVestingAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TotalAllocation", wireType) + } + m.TotalAllocation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TotalAllocation |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commencement", wireType) + } + m.Commencement = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commencement |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateTeamVestingAccountResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateTeamVestingAccountResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateTeamVestingAccountResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/team/types/types.go b/x/team/types/types.go new file mode 100644 index 00000000..af2a3796 --- /dev/null +++ b/x/team/types/types.go @@ -0,0 +1,74 @@ +package types + +import ( + "errors" + "strconv" + "time" + + "github.com/cosmos/cosmos-sdk/types/bech32" +) + +// VestingPlan contains basic information for one member +type VestingPlan struct { + MaximumVestingAmount uint64 + + ClawbackAmount uint64 + + TokenVestingStart uint64 + + TokenVestingFinished uint64 + + TokenUnlockStart uint64 + + TokenUnlockFinished uint64 +} + +// VestingStatus contains computed vesting values for a member at a given time +type VestingStatus struct { + // vested_amount ... + TotalVestedAmount uint64 + // unlocked_amount ... + TotalUnlockedAmount uint64 + + // i.e. U(t) - C + CurrentClaimableAmount uint64 + + // unvested_amount ... + LockedVestedAmount uint64 + + // unvested_amount ... + RemainingUnvestedAmount uint64 +} + +var ( + TEAM_AUTHORITY_STRING = "kyve1fd4qu868n7arav8vteghcppxxa0p2vna5f5ep8" + TEAM_ALLOCATION_STRING = "165000000000000000" + TGE_STRING = "2023-02-01T10:34:15" +) + +// Convert passed build variables (string) to the corresponding int values +func init() { + // Authority needs to be a valid Bech32 address + prefix, _, err := bech32.DecodeAndConvert(TEAM_AUTHORITY_STRING) + if err != nil { + panic(err) + } + if prefix != "kyve" { + panic(errors.New("team authority address is not a KYVE address")) + } + AUTHORITY_ADDRESS = TEAM_AUTHORITY_STRING + + // TEAM_ALLOCATION must be a valid integer + parsedAllocation, err := strconv.ParseUint(TEAM_ALLOCATION_STRING, 10, 64) + if err != nil { + panic(err) + } + TEAM_ALLOCATION = parsedAllocation + + // TGE must be a valid unix date-string + tge, err := time.Parse("2006-01-02T15:04:05", TGE_STRING) + if err != nil { + panic(err) + } + TGE = uint64(tge.Unix()) +}