commit 20afb5db809f6184b1c3e9214c2f7d547e23cdb2e084c725cbb54e940d3b9d9d Author: Mukan Erkin Törük Date: Mon May 11 03:18:24 2026 +0300 initial: sovereign Mukan Network fork diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..0688687 --- /dev/null +++ b/.clang-format @@ -0,0 +1,116 @@ +--- +Language: Proto +BasedOnStyle: Google +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignConsecutiveAssignments: true +AlignConsecutiveDeclarations: true +AlignEscapedNewlines: Right +AlignOperands: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: false +AlwaysBreakTemplateDeclarations: false +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 120 +CommentPragmas: "^ IWYU pragma:" +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + - Regex: ".*" + Priority: 1 +IncludeIsMainRegex: "(Test)?$" +IndentCaseLabels: false +IndentPPDirectives: None +IndentWidth: 2 +IndentWrappedFunctionNames: false +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: true +MacroBlockBegin: "" +MacroBlockEnd: "" +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Right +RawStringFormats: + - Delimiters: + - pb + Language: TextProto + BasedOnStyle: Google +ReflowComments: true +SortIncludes: true +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never +--- + diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..80adb60 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +Dockerfile +Vagrantfile + +build/ +coverage.txt diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..2c80729 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +client/docs/swagger-ui/* linguist-vendored diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..2785d2e --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,15 @@ +# CODEOWNERS: https://help.github.com/articles/about-codeowners/ + +# NOTE: Order is important; the last matching pattern takes the most precedence + +# Cosmos SDK Codeowners + +# Core team as default owners + +* @cosmos/sdk-core-dev + +# Components + +# docs configuration + +/docs/ @cosmos/sdk-core-dev diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000..7857ca7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,30 @@ +--- +name: Bug Report +about: Create a report to help us squash bugs! +title: "[Bug]: " +labels: "T:Bug" +--- + + + + + +## Summary of Bug + + + +## Version + + + +## Steps to Reproduce + + diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000..b611b43 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,49 @@ +name: 🐛 Bug report +description: Create a report to help us squash bugs! +title: "[Bug]: " +labels: ["T:Bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + Before smashing the submit button please review the template. + + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search existing issues to avoid creating duplicates. + options: + - label: I have searched the existing issues + required: true + + - type: markdown + attributes: + value: | + IMPORTANT: Prior to opening a bug report, check if it affects one of the core modules + and if its eligible for a bug bounty on `SECURITY.md`. Bugs that are not submitted + through the appropriate channels won't receive any bounty. + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + placeholder: Tell us what you see! + value: "A bug happened!" + validations: + required: true + - type: input + attributes: + label: Cosmos SDK Version + description: If applicable, specify the version you're using + placeholder: 0.46, 0.47, main, etc. + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: How to reproduce? + description: If applicable could you describe how we could reproduce the bug + placeholder: Tell us what how to reproduce the bug! + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml new file mode 100644 index 0000000..e9ee52c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -0,0 +1,18 @@ +name: Documentation Request +description: Create an issue for missing or incorrect documentation +title: "[Documentation]: " +labels: ["T:Docs"] +body: + - type: markdown + attributes: + value: | + ✰ Thanks for opening an issue! ✰ + Tell us where what you would like to see get added to the documentation or if there is an error in the documentation? + + - type: textarea + id: what-happened + attributes: + label: Summary + placeholder: Description of what you would like to see + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/epics.md b/.github/ISSUE_TEMPLATE/epics.md new file mode 100644 index 0000000..d164546 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/epics.md @@ -0,0 +1,31 @@ +--- +name: Epic +about: Create an epic/user +title: "[Epic]: " +labels: T:Epic +--- + + + +## Summary + + + +## Problem Definition + + + +## Work Breakdown + + diff --git a/.github/ISSUE_TEMPLATE/epics.yml b/.github/ISSUE_TEMPLATE/epics.yml new file mode 100644 index 0000000..1e00579 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/epics.yml @@ -0,0 +1,43 @@ + +name: Epic +description: Create an epic/user +title: "[Epic]: " +labels: ["T:Epic"] +body: + - type: markdown + attributes: + value: | + Thanks for opening this issue, this template is meant for long lived work scopes, if this is what you're looking for please continue + + - type: textarea + id: summary + attributes: + label: Summary + description: | + What are the user needs? + How could this solution fix the user facing problem? + placeholder: Short, concise description of the proposed feature/changes to the repository + validations: + required: true + - type: textarea + id: problem + attributes: + label: Problem Definition + description: | + Why do we need this feature? + What problems may be addressed by introducing this feature? + What benefits does the SDK stand to gain by including this feature? + Are there any disadvantages of including this feature? + placeholder: Description of the issue being faced + validations: + required: true + - type: textarea + id: work + attributes: + label: Work Breakdown + description: | + Break the work into many bullet points that will later be turned into issues that can be assigned to developers to work on + This work may been to be broken up into phases of work in order to better organize when and how things get done. + placeholder: Description of the steps needed to deliver this feature + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000..146033e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,28 @@ +--- +name: Feature Request +about: Create a proposal to request a feature +title: "[Feature]: " +labels: T:feature-request +--- + + + +## Summary + + + +## Problem Definition + + + +## Proposal + + diff --git a/.github/ISSUE_TEMPLATE/feature-request.yml b/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000..6b17489 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,41 @@ +name: Feature Request +description: Create a proposal to request a feature +title: "[Feature]: " +labels: ["T:feature-request"] +body: + - type: markdown + attributes: + value: | + ✰ Thanks for opening an issue! ✰ + - type: textarea + id: summary + attributes: + label: Summary + description: | + What are the user needs? + How could this solution fix the user facing problem? + placeholder: Short, concise description of the proposed feature/changes to the repository + validations: + required: true + - type: textarea + id: problem + attributes: + label: Problem Definition + description: | + If applicable please answer the below questions + Why do we need this feature? + What problems may be addressed by introducing this feature? + What benefits does the SDK stand to gain by including this feature? + Are there any disadvantages of including this feature? + placeholder: Description of the issue being faced + validations: + required: false + - type: textarea + id: proposal + attributes: + label: Proposed Feature + description: | + Description of the proposed features or changes to an existing feature to meet your needs + placeholder: Description of the proposed feature(s) + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/module-readiness-checklist.md b/.github/ISSUE_TEMPLATE/module-readiness-checklist.md new file mode 100644 index 0000000..a3d51d1 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/module-readiness-checklist.md @@ -0,0 +1,40 @@ +--- +name: Module Readiness Checklist +about: Pre-flight checklist that modules must pass in order to be included in a release of the Cosmos SDK +labels: 'module-readiness-checklist' +--- + +## x/{MODULE_NAME} Module Readiness Checklist + +This checklist is to be used for tracking the final internal audit of new Cosmos SDK modules prior to inclusion in a published release. + +### Release Candidate Checklist + +The following checklist should be gone through once the module has been fully implemented. This audit should be performed directly on `main`, or preferably on a `alpha` or `beta` release tag that includes the module. + +The module **should not** be included in any Release Candidate tag until it has passed this checklist. + +- [ ] API audit (at least 1 person) (@assignee) + - [ ] Are Msg and Query methods and types well-named and organized? + - [ ] Is everything well documented (inline godoc as well as the spec [README.md](https://github.com/cosmos/cosmos-sdk/blob/main/docs/spec/README.md) in module directory) +- [ ] State machine audit (at least 2 people) (@assignee1, @assignee2) + - [ ] Read through MsgServer code and verify correctness upon visual inspection + - [ ] Ensure all state machine code which could be confusing is properly commented + - [ ] Make sure state machine logic matches Msg method documentation + - [ ] Ensure that all state machine edge cases are covered with tests and that test coverage is sufficient (at least 90% coverage on module code) + - [ ] Assess potential threats for each method including spam attacks and ensure that threats have been addressed sufficiently. This should be done by writing up threat assessment for each method + - [ ] Assess potential risks of any new third party dependencies and decide whether a dependency audit is needed +- [ ] Completeness audit, fully implemented with tests (at least 1 person) (@assignee) + - [ ] Genesis import and export of all state + - [ ] Query services + - [ ] CLI methods + - [ ] All necessary migration scripts are present (if this is an upgrade of existing module) + +### Published Release Checklist + +After the above checks have been audited and the module is included in a tagged Release Candidate, the following additional checklist should be undertaken for live testing, and potentially a 3rd party audit (if deemed necessary): + +- [ ] Testnet / devnet testing (2-3 people) (@assignee1, @assignee2, @assignee3) + - [ ] All Msg methods have been tested especially in light of any potential threats identified + - [ ] Genesis import and export has been tested +- [ ] Nice to have (and needed in some cases if threats could be high): Official 3rd party audit diff --git a/.github/ISSUE_TEMPLATE/qa.md b/.github/ISSUE_TEMPLATE/qa.md new file mode 100644 index 0000000..c1dafb5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/qa.md @@ -0,0 +1,87 @@ + + +## Summary + + + +## Major Changes + + + +## Gotchas + + + +## QA Breakdown + +* Audit + * [ ] Audit BaseApp + * [ ] Audit Types + * [ ] Audit x/auth + * [ ] Audit x/authz + * [ ] Audit x/bank + * [ ] Audit x/circuit + * [ ] Audit x/consensus + * [ ] Audit x/distribution + * [ ] Audit x/evidence + * [ ] Audit x/epochs + * [ ] Audit x/feegrant + * [ ] Audit x/genutil + * [ ] Audit x/gov + * [ ] Audit x/group + * [ ] Audit x/mint + * [ ] Audit x/nft + * [ ] Audit x/protocolpool + * [ ] Audit x/slashing + * [ ] Audit x/staking + * [ ] Audit x/tx + * [ ] Audit x/upgrade + * [ ] Audit client + * [ ] Audit server + * [ ] Audit store + * [ ] Audit runtime + * [ ] Audit simapp +* [ ] Release alpha +* [ ] Cosmos-SDK testnet +* [ ] Public testnet (IBC, WASM, SDK) +* [ ] Upgrade a chain with data from vX +* Release documentation + * [ ] Audit UPGRADING.md + * [ ] Update all codeblock to the appropriate version number + + +### Audit checklist + +* please copy to a markdown to follow while you walk through the code +* 2 people should be assigned to each section + +* [ ] API audit + * spec audit: check if the spec is complete. + * Are Msg and Query methods and types well-named and organized? + * Is everything well documented (inline godoc as well as package [`README.md`](https://docs.cosmos.network/main/spec/SPEC_MODULE#common-layout) in module directory) + * check the proto definition - make sure everything is in accordance to ADR-30 (at least 1 person, TODO assignee) + * Check new fields and endpoints have the `Since: cosmos-sdk X` comment +* [ ] Completeness audit, fully implemented with tests + * [ ] Genesis import and export of all state + * [ ] Query services + * [ ] CLI methods + * [ ] All necessary migration scripts are present (if this is an upgrade of existing module) +* [ ] State machine audit + * [ ] Read through MsgServer code and verify correctness upon visual inspection + * [ ] Ensure all state machine code which could be confusing is properly commented + * [ ] Make sure state machine logic matches Msg method documentation + * [ ] Ensure that all state machine edge cases are covered with tests and that test coverage is sufficient (at least 90% coverage on module code) + * [ ] Assess potential threats for each method including spam attacks and ensure that threats have been addressed sufficiently. This should be done by writing up threat assessment for each method. Specifically we should be paying attention to: + * [ ] algorithmic complexity and places this could be exploited (ex. nested `for` loops) + * [ ] charging gas complex computation (ex. `for` loops) + * [ ] storage is safe (we don't pollute the state). + * [ ] Assess potential risks of any new third party dependencies and decide whether a dependency audit is needed + * [ ] Check correctness of simulation implementation if any +* [ ] Audit Changelog against commit log, ensuring all breaking changes, bug fixes, and improvements are properly documented. + +If any changes are needed, please make them against main and backport them to release/vX.X.x diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..8b5b5a6 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,38 @@ +# Description + +Closes: #XXXX + + + +--- + +## Author Checklist + +*All items are required. Please add a note to the item if the item is not applicable and +please add links to any relevant follow up issues. Your PR will not be merged unless you satisfy +all of these items.* + +I have... + +* [ ] included the correct [type prefix](https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json) in the PR title, you can find examples of the prefixes below: + +* [ ] confirmed `!` in the type prefix if API or client breaking change +* [ ] targeted the correct branch (see [PR Targeting](https://github.com/cosmos/cosmos-sdk/blob/main/CONTRIBUTING.md#pr-targeting)) +* [ ] provided a link to the relevant issue or specification +* [ ] reviewed "Files changed" and left comments if necessary +* [ ] included the necessary unit and integration [tests](https://github.com/cosmos/cosmos-sdk/blob/main/CONTRIBUTING.md#testing) +* [ ] added a changelog entry to `CHANGELOG.md` +* [ ] updated the relevant documentation or specification, including comments for [documenting Go code](https://blog.golang.org/godoc) +* [ ] confirmed all CI checks have passed + diff --git a/.github/codeql/config.yml b/.github/codeql/config.yml new file mode 100644 index 0000000..3ca864f --- /dev/null +++ b/.github/codeql/config.yml @@ -0,0 +1,11 @@ +packs: + - crypto-com/cosmos-sdk-codeql +queries: + - uses: security-and-quality + - uses: security-experimental + - uses: security-extended +paths-ignore: + - api + - '**/*_test.go' + - '**/*.pulsar.go' + - '**/*.pb.go' diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..cf4b583 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,259 @@ +# Please see the documentation for all configuration options: +# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: daily + time: "01:00" + + - package-ecosystem: gomod + directory: "/" + schedule: + interval: daily + time: "01:05" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/simapp" + schedule: + interval: daily + time: "01:10" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/tests" + schedule: + interval: weekly + day: monday + time: "01:15" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/api" + schedule: + interval: weekly + day: tuesday + time: "01:20" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/core" + schedule: + interval: weekly + day: thursday + time: "01:30" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/depinject" + schedule: + interval: weekly + day: friday + time: "01:35" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/errors" + schedule: + interval: weekly + day: monday + time: "01:40" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/math" + schedule: + interval: weekly + day: tuesday + time: "01:45" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/client/v2" + schedule: + interval: weekly + day: wednesday + time: "01:50" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/schema" + schedule: + interval: weekly + day: wednesday + time: "01:53" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/x/tx" + schedule: + interval: weekly + day: thursday + time: "01:55" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/tools/cosmovisor" + schedule: + interval: weekly + day: friday + time: "02:00" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/tools/confix" + schedule: + interval: weekly + day: tuesday + time: "02:10" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/collections" + schedule: + interval: weekly + day: friday + time: "02:20" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/x/nft" + schedule: + interval: weekly + day: monday + time: "02:25" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/x/circuit" + schedule: + interval: weekly + day: tuesday + time: "02:30" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "x/feegrant" + schedule: + interval: weekly + day: wednesday + time: "02:35" + labels: + - "A:automerge" + - dependencies + + - package-ecosystem: gomod + directory: "/x/evidence" + schedule: + interval: weekly + day: thursday + time: "02:40" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/store" + schedule: + interval: weekly + day: friday + time: "02:45" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "x/upgrade" + schedule: + interval: weekly + day: monday + time: "02:50" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "log" + schedule: + interval: weekly + day: tuesday + time: "02:55" + labels: + - "A:automerge" + - dependencies + - package-ecosystem: gomod + directory: "/tools/benchmark" + schedule: + interval: weekly + day: wednesday + time: "03:25" + labels: + - "A:automerge" + - dependencies + + # Dependencies should be up to date on release branch + - package-ecosystem: gomod + directory: "/" + target-branch: "release/v0.47.x" + schedule: + interval: daily + time: "03:00" + labels: + - "A:automerge" + - dependencies + - "testing-required" + allow: + - dependency-name: "github.com/cosmos/cosmos-sdk/*" + dependency-type: "all" + - dependency-name: "github.com/cosmos/*" + dependency-type: "all" + - dependency-name: "cosmossdk.io/*" + dependency-type: "all" + - dependency-name: "github.com/cometbft/*" + dependency-type: "all" + ignore: + - dependency-name: "github.com/cometbft/cometbft" + # cometbft 0.37 is not semver, but we want to only update "patch" versions for 0.37.x + update-types: + ["version-update:semver-major", "version-update:semver-minor"] + + - package-ecosystem: gomod + directory: "/" + target-branch: "release/v0.50.x" + schedule: + interval: daily + time: "03:00" + labels: + - "A:automerge" + - dependencies + - "testing-required" + allow: + - dependency-name: "github.com/cosmos/cosmos-sdk/*" + dependency-type: "all" + - dependency-name: "github.com/cosmos/*" + dependency-type: "all" + - dependency-name: "cosmossdk.io/*" + dependency-type: "all" + - dependency-name: "github.com/cometbft/*" + dependency-type: "all" + ignore: + - dependency-name: "github.com/cometbft/cometbft" + # cometbft 0.38 is not semver, but we want to only update "patch" versions for 0.38.x + update-types: + ["version-update:semver-major", "version-update:semver-minor"] diff --git a/.github/pr_labeler.yml b/.github/pr_labeler.yml new file mode 100644 index 0000000..7dc183b --- /dev/null +++ b/.github/pr_labeler.yml @@ -0,0 +1,71 @@ +"C:CLI": + - client/**/* + - x/*/client/**/* +"C:Confix": + - tools/confix/**/* +"C:Cosmovisor": + - tools/cosmovisor/**/* +"C:Keys": + - client/keys/**/* +"C:Simulations": + - types/simulation/**/* + - x/simulation/**/* + - x/*/simulation/**/* + - simsx/**/* + - tools/benchmark/**/* +"C:Store": + - store/**/* +"C:collections": + - collections/**/* +"C:log": + - log/* +"C:x/auth": + - x/auth/**/* +"C:x/authz": + - x/authz/**/* +"C:x/bank": + - x/bank/**/* +"C:x/circuit": + - x/circuit/**/* +"C:x/consensus": + - x/consensus/**/* +"C:x/distribution": + - x/distribution/**/* +"C:x/evidence": + - x/evidence/**/* +"C:x/feegrant": + - x/feegrant/**/* +"C:x/genutil": + - x/genutil/**/* +"C:x/gov": + - x/gov/**/* +"C:x/group": + - x/group/**/* +"C:x/mint": + - x/mint/**/* +"C:x/nft": + - x/nft/**/* +"C:x/protocolpool": + - x/protocolpool/**/* +"C:x/slashing": + - x/slashing/**/* +"C:x/staking": + - x/staking/**/* +"C:x/tx": + - x/tx/**/* +"C:x/upgrade": + - x/upgrade/**/* +"C:x/epochs": + - x/epochs/**/* +"Type: ADR": + - docs/architecture/**/* +"Type: Build": + - Makefile + - Dockerfile + - docker-compose.yml + - scripts/* +"Type: CI": + - .github/** + - buf.yaml + - .mergify.yml + - .golangci.yml diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml new file mode 100644 index 0000000..7bfe086 --- /dev/null +++ b/.github/workflows/build-docs.yml @@ -0,0 +1,38 @@ +name: Build Docs +# This workflow runs when a PR is labeled with `docs` +# This will check if the docs build successfully by running `make build-docs` +on: + pull_request: + branches: + - main + - "release/**" + paths: + - "docs/**" + - "x/**/*.md" + - .github/workflows/deploy-docs.yml + - .github/workflows/build-docs.yml + +permissions: + contents: read + +jobs: + check-docs-build: + name: Check docs build + runs-on: ubuntu-latest + steps: + - name: Checkout 🛎️ + uses: actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Setup Node.js 🔧 + uses: actions/setup-node@v4 + with: + node-version: "16.x" + + # npm install npm should be removed when https://github.com/npm/cli/issues/4942 is fixed + - name: Build docs 🔧 + run: | + npm install -g npm@8.5.5 + make build-docs diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000..38785eb --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,54 @@ +name: Build SimApp +on: + pull_request: + merge_group: + push: + branches: + - main + - release/** +permissions: + contents: read + +concurrency: + group: ci-${{ github.ref }}-build + cancel-in-progress: true + +jobs: + build: + runs-on: depot-ubuntu-22.04-4 + strategy: + matrix: + go-arch: ["amd64", "arm64"] # drop 32 bit support for now (and maybe forever) + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: install aarch64-gcc + if: matrix.go-arch == 'arm64' + run: sudo apt-get install gcc-aarch64-linux-gnu binutils-aarch64-linux-gnu + ################### + #### Build App #### + ################### + - name: Build + run: GOARCH=${{ matrix.go-arch }} make build + - name: Build with legacy app.go + run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="legacy" make build + # TODO re-add with comet v1 + #- name: Build with BLS12381 + # if: matrix.go-arch == 'amd64' + # run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="bls12381" make build + - name: Build with Secp_cgo + if: matrix.go-arch == 'amd64' + run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="secp" make build + - name: Build with legacy app.go and Secp_cgo + if: matrix.go-arch == 'amd64' + run: GOARCH=${{ matrix.go-arch }} COSMOS_BUILD_OPTIONS="legacy,secp" make build + ################### + ## Build Tooling ## + ################### + - name: Build Cosmovisor + run: GOARCH=${{ matrix.go-arch }} make cosmovisor + - name: Build Confix + run: GOARCH=${{ matrix.go-arch }} make confix diff --git a/.github/workflows/changelog-reminder.yml b/.github/workflows/changelog-reminder.yml new file mode 100644 index 0000000..1d25a0c --- /dev/null +++ b/.github/workflows/changelog-reminder.yml @@ -0,0 +1,19 @@ +# Checks if a changelog is missing in the PR diff +name: Changelog Reminder +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + paths: ["**/*.go"] +permissions: + pull-requests: write +jobs: + remind: + name: Changelog Reminder + runs-on: depot-ubuntu-22.04-4 + # Skip draft PRs and PRs starting with: revert, test, chore, ci, docs, style, build, refactor + if: "!github.event.pull_request.draft && !contains(github.event.pull_request.title, 'revert') && !contains(github.event.pull_request.title, 'test') && !contains(github.event.pull_request.title, 'chore') && !contains(github.event.pull_request.title, 'ci') && !contains(github.event.pull_request.title, 'docs') && !contains(github.event.pull_request.title, 'style') && !contains(github.event.pull_request.title, 'build') && !contains(github.event.pull_request.title, 'refactor')" + steps: + - uses: actions/checkout@v4 + - uses: mskelton/changelog-reminder-action@v3 + with: + message: "@${{ github.actor }} your pull request is missing a changelog!" diff --git a/.github/workflows/clean-action-artifacts.yml b/.github/workflows/clean-action-artifacts.yml new file mode 100644 index 0000000..2143ee5 --- /dev/null +++ b/.github/workflows/clean-action-artifacts.yml @@ -0,0 +1,17 @@ +name: Remove GitHub Action Old Artifacts + +on: + schedule: + # Every day at 1am + - cron: "0 1 * * *" + +jobs: + remove-old-artifacts: + runs-on: depot-ubuntu-22.04-4 + timeout-minutes: 30 + + steps: + - name: Remove old artifacts + uses: c-hive/gha-remove-artifacts@v1 + with: + age: "7 days" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000..e91a287 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,58 @@ +name: "CodeQL" + +on: + pull_request: + paths: + - "**.go" + push: + branches: + - main + - release/** + paths: + - "**.go" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + analyze: + name: Analyze + runs-on: depot-ubuntu-22.04-4 + permissions: + actions: read + contents: read + security-events: write + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: "go" + config-file: ./.github/codeql/config.yml + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/consensuswarn.yml b/.github/workflows/consensuswarn.yml new file mode 100644 index 0000000..2697413 --- /dev/null +++ b/.github/workflows/consensuswarn.yml @@ -0,0 +1,19 @@ +name: "Warn about consensus code changes" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + main: + permissions: + pull-requests: write # For reading the PR and posting comment + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: orijtech/consensuswarn@main + with: + roots: "github.com/cosmos/cosmos-sdk/baseapp.BaseApp.PrepareProposal,github.com/cosmos/cosmos-sdk/baseapp.BaseApp.ProcessProposal,github.com/cosmos/cosmos-sdk/baseapp.BaseApp.FinalizeBlock,github.com/cosmos/cosmos-sdk/baseapp.BaseApp.Commit,github.com/cosmos/cosmos-sdk/baseapp.BaseApp.VerifyVoteExtension" diff --git a/.github/workflows/dependabot-update-all.yml b/.github/workflows/dependabot-update-all.yml new file mode 100644 index 0000000..088cd98 --- /dev/null +++ b/.github/workflows/dependabot-update-all.yml @@ -0,0 +1,47 @@ +name: Dependabot Update All Go Modules +on: pull_request + +permissions: + contents: write + pull-requests: write + +env: + PR_TITLE: ${{ github.event.pull_request.title }} + +jobs: + update-all: + runs-on: depot-ubuntu-22.04-4 + if: ${{ github.actor == 'dependabot[bot]' }} + steps: + - name: Generate Token + uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v1 + id: app-token + with: + app-id: "${{ secrets.APP_ID }}" + private-key: "${{ secrets.APP_PRIVATE_KEY }}" + - uses: actions/checkout@v4 + with: + repository: ${{ github.event.pull_request.head.repo.full_name }} + ref: ${{ github.event.pull_request.head.ref }} + token: "${{ steps.app-token.outputs.token }}" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Extract updated dependency + id: deps + run: | + # Extract the dependency name from the PR title + # Example: "build(deps): Bump github.com/cosmos/cosmos-sdk from 0.46.0 to 0.47.0" + # Extracts "github.com/cosmos/cosmos-sdk" and "0.47.0" + echo "name=$(echo "$PR_TITLE" | cut -d ' ' -f 3)" >> $GITHUB_OUTPUT + echo "version=$(echo "$PR_TITLE" | cut -d ' ' -f 7)" >> $GITHUB_OUTPUT + - name: Update all Go modules + run: | + ./scripts/go-update-dep-all.sh ${{ format('{0}@v{1}', steps.deps.outputs.name, steps.deps.outputs.version) }} + ./scripts/go-mod-tidy-all.sh + - name: Commit changes + uses: EndBug/add-and-commit@v9 + with: + default_author: user_info + message: "${{ github.event.pull_request.title }} for all modules" diff --git a/.github/workflows/dependencies-review.yml b/.github/workflows/dependencies-review.yml new file mode 100644 index 0000000..94db9fc --- /dev/null +++ b/.github/workflows/dependencies-review.yml @@ -0,0 +1,46 @@ +name: "Dependency Review" +on: + pull_request: + merge_group: + +permissions: + contents: read + +jobs: + dependency-review: + runs-on: depot-ubuntu-22.04-4 + steps: + - name: "Checkout Repository" + uses: actions/checkout@v4 + - name: "Setup Go" + uses: actions/setup-go@v5 + with: + go-version: "1.24" + check-latest: true + - name: "Dependency Review" + uses: actions/dependency-review-action@v4 + with: + base-ref: ${{ github.event.pull_request.base.sha || 'main' }} + head-ref: ${{ github.event.pull_request.head.sha || github.ref }} + fail-on-severity: high + - name: "Dependency audit" + run: ./scripts/dep-assert.sh + - name: "Go vulnerability check" + id: govuln + run: | + # Run the vulnerability check and capture its output (ignoring non-zero exit codes) + make vulncheck 2>&1 | tee govulncheck-output.txt || true + + # Extract vulnerability identifiers from the output (e.g., GO-2025-3443) + vulnerabilities=$(grep -o 'GO-[0-9]\{4\}-[0-9]\+' govulncheck-output.txt | sort | uniq) + echo "Detected vulnerabilities: $vulnerabilities" + + # Check if any vulnerability other than GO-2025-3443 exists + for vuln in $vulnerabilities; do + if [ "$vuln" != "GO-2025-3443" ]; then + echo "Found vulnerability $vuln, failing..." + exit 1 + fi + done + + echo "Only known vulnerability (GO-2025-3443) present. Continuing." diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000..ee3d198 --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,73 @@ +name: Build & Push +# Build & Push builds the simapp docker image on every push to main and +# and pushes the image to https://ghcr.io/cosmos/simapp +on: + pull_request: + paths: + - "Dockerfile" + push: + branches: + - main + tags: + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0.0, v20.15.10 + - "v[0-9]+.[0-9]+.[0-9]+-rc.*" # Push events to matching v*, i.e. v1.0.0-rc.1, v20.15.10-rc.5 + - "v[0-9]+.[0-9]+.[0-9]+-beta.*" # Push events to matching v*, i.e. v1.0.0-beta.1, v20.15.10-beta.5 + workflow_dispatch: + inputs: + tags: + description: "SDK version (e.g 0.47.1)" + required: true + type: string + +permissions: + contents: read + packages: write + id-token: write + +env: + # Use docker.io for Docker Hub if empty + REGISTRY: ghcr.io + # github.repository as / + IMAGE_NAME: cosmos/simapp + +jobs: + build: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Extract Docker metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: | + type=raw,value=latest,enable={{is_default_branch}} + type=semver,pattern=v{{major}}.{{minor}} + type=semver,pattern={{version}},value=v${{ inputs.tags }},enable=${{ inputs.tags != '' }} + flavor: | + latest=false + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log into registry ${{ env.REGISTRY }} + if: ${{ github.event_name != 'pull_request' }} + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Depot + uses: depot/setup-action@v1 + + - name: Publish to GitHub Packages + uses: depot/build-push-action@v1 + with: + project: gnm1jqptpw + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} diff --git a/.github/workflows/gosec.yml b/.github/workflows/gosec.yml new file mode 100644 index 0000000..1694d52 --- /dev/null +++ b/.github/workflows/gosec.yml @@ -0,0 +1,36 @@ +name: Run Gosec +on: + pull_request: + paths: + - "**/*.go" + - "go.mod" + - "go.sum" + push: + branches: + - main + paths: + - "**/*.go" + - "go.mod" + - "go.sum" + +jobs: + Gosec: + permissions: + security-events: write + + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v4 + + - name: Run Gosec Security Scanner + uses: securego/gosec@master + with: + args: "-exclude=G101,G107 -exclude-generated -no-fail -fmt sarif -out results.sarif ./..." + + - name: Upload SARIF file + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif diff --git a/.github/workflows/issue_labeler.yml b/.github/workflows/issue_labeler.yml new file mode 100644 index 0000000..a2b1cf9 --- /dev/null +++ b/.github/workflows/issue_labeler.yml @@ -0,0 +1,15 @@ +name: "Issue Labeler" +on: + issues: + types: [opened] + +jobs: + triage: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: github/issue-labeler@v3.4 + if: join(github.event.issue.labels) == '' + with: + repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: .github/issue_labeler.yml + enable-versioned-regex: 0 diff --git a/.github/workflows/issues.yml b/.github/workflows/issues.yml new file mode 100644 index 0000000..1641268 --- /dev/null +++ b/.github/workflows/issues.yml @@ -0,0 +1,18 @@ +name: Add Sprint issues to Cosmos SDK Project + +on: + issues: + types: + - opened + - labeled + +jobs: + add-to-project: + name: Add issue to project + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/add-to-project@v1.0.2 + with: + project-url: https://github.com/orgs/cosmos/projects/26 +# add all issues opened to the issue board for triage and assignment + github-token: ${{ secrets.PERSONAL_TOKEN }} diff --git a/.github/workflows/lint-pr.yml b/.github/workflows/lint-pr.yml new file mode 100644 index 0000000..46a4353 --- /dev/null +++ b/.github/workflows/lint-pr.yml @@ -0,0 +1,47 @@ +name: "Lint PR" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +permissions: + contents: read + +jobs: + main: + permissions: + pull-requests: read # for amannn/action-semantic-pull-request to analyze PRs + statuses: write # for amannn/action-semantic-pull-request to mark status of analyzed PR + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: amannn/action-semantic-pull-request@v5.5.3 + id: lint_pr_title + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - uses: marocchino/sticky-pull-request-comment@v2 + # When the previous steps fails, the workflow would stop. By adding this + # condition you can continue the execution with the populated error message. + if: always() && (steps.lint_pr_title.outputs.error_message != null) + with: + header: pr-title-lint-error + message: | + Hey there and thank you for opening this pull request! 👋🏼 + + We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted. + + Details: + + ``` + ${{ steps.lint_pr_title.outputs.error_message }} + ``` + + # Delete a previous comment when the issue has been resolved + - if: ${{ steps.lint_pr_title.outputs.error_message == null }} + uses: marocchino/sticky-pull-request-comment@v2 + with: + header: pr-title-lint-error + delete: true diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000..ca7c8cb --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,47 @@ +name: Lint +on: + push: + branches: + - main + - release/** + pull_request: + merge_group: +permissions: + contents: read + +jobs: + golangci: + name: golangci-lint + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.mk + Makefile + **/Makefile + .golangci.yml + - name: run linting (long) + if: env.GIT_DIFF + id: lint_long + run: | + make lint + - uses: technote-space/get-diff-action@v6.1.2 + if: steps.lint_long.outcome == 'skipped' + with: + PATTERNS: | + **/*.go + *.go + - name: run linting (short) + if: steps.lint_long.outcome == 'skipped' && env.GIT_DIFF + run: | + make lint + env: + GIT_DIFF: ${{ env.GIT_DIFF }} + LINT_DIFF: 1 diff --git a/.github/workflows/md-link-checker.yml b/.github/workflows/md-link-checker.yml new file mode 100644 index 0000000..eec34c6 --- /dev/null +++ b/.github/workflows/md-link-checker.yml @@ -0,0 +1,31 @@ +name: Check Markdown links +on: + pull_request: + paths: + - "docs/**" +jobs: + markdown-link-check: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - run: cd docs && sh ./pre.sh + - uses: gaurav-nelson/github-action-markdown-link-check@1.0.17 + with: + folder-path: "docs" + - run: cd docs && sh ./post.sh + sims-notify-failure: + permissions: + contents: none + runs-on: depot-ubuntu-22.04-4 + if: ${{ failure() }} + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Broken Links + SLACK_ICON_EMOJI: ":skull:" + SLACK_COLOR: danger + SLACK_MESSAGE: Links are broken in docs + SLACK_FOOTER: "" diff --git a/.github/workflows/pr-go-mod-tidy-mocks.yml b/.github/workflows/pr-go-mod-tidy-mocks.yml new file mode 100644 index 0000000..1eb8dad --- /dev/null +++ b/.github/workflows/pr-go-mod-tidy-mocks.yml @@ -0,0 +1,52 @@ +name: "Checks dependencies and mocks generation" +on: + merge_group: + pull_request: + push: + branches: + - main + +concurrency: + group: ci-${{ github.ref }}-pr-go-mod-tidy-mocks + cancel-in-progress: true + +jobs: + go-mod-tidy: + name: Check go mod tidy + runs-on: depot-ubuntu-22.04-4 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Run go mod tidy + run: ./scripts/go-mod-tidy-all.sh + - name: Check for diffs + run: | + git diff --exit-code || { + echo "Please run './scripts/go-mod-tidy-all.sh' and commit the changes"; + exit 1; + } + + generate-mocks: + name: Check up to date mocks + runs-on: depot-ubuntu-22.04-4 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Generate mocks + run: make mocks + - name: Check for diffs + run: | + git diff --exit-code || { + echo "Please run 'make mocks' and commit the changes"; + exit 1; + } diff --git a/.github/workflows/pr_labeler.yml b/.github/workflows/pr_labeler.yml new file mode 100644 index 0000000..6c1c694 --- /dev/null +++ b/.github/workflows/pr_labeler.yml @@ -0,0 +1,18 @@ +name: "Pull Request Labeler" +on: + - pull_request_target + +permissions: + contents: read + +jobs: + labeler: + permissions: + contents: read # for actions/labeler to determine modified files + pull-requests: write # for actions/labeler to add labels to PRs + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/labeler@v4 # v5 is broken, ref https://github.com/actions/labeler/issues/712. Do not bump. + with: + configuration-path: .github/pr_labeler.yml + repo-token: "${{ secrets.GITHUB_TOKEN }}" diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml new file mode 100644 index 0000000..7e2f7d2 --- /dev/null +++ b/.github/workflows/proto-docker.yml @@ -0,0 +1,73 @@ +name: Build & Push SDK Proto Builder +on: + push: + branches: + - main + paths: + - "contrib/devtools/Dockerfile" + workflow_dispatch: + inputs: + tags: + description: "Docker image tags" + required: true + type: string + pull_request: + paths: + - "contrib/devtools/Dockerfile" + +env: + REGISTRY: ghcr.io + IMAGE_NAME: cosmos/proto-builder + +# Allow one concurrent deployment +concurrency: + group: "proto-docker" + cancel-in-progress: true + +jobs: + build: + runs-on: depot-ubuntu-22.04-4 + permissions: + contents: read + packages: write + id-token: write + + steps: + # set VERSION to new version when making changes, when merged to main the image will automatically be pushed + - uses: actions/checkout@v4 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata (tags, labels) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + # modify value when deploying a new version + tags: | + type=semver,pattern={{version}},value=${{ inputs.tags }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + if: ${{ github.event_name != 'pull_request' }} + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup Depot + uses: depot/setup-action@v1 + + - name: Publish to GHCR + uses: depot/build-push-action@v1 + with: + project: gnm1jqptpw + context: ./contrib/devtools + platforms: linux/amd64,linux/arm64 + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/proto-registry.yml b/.github/workflows/proto-registry.yml new file mode 100644 index 0000000..5e18c13 --- /dev/null +++ b/.github/workflows/proto-registry.yml @@ -0,0 +1,23 @@ +# name: Buf Push +# Protobuf runs buf (https://buf.build/) push updated proto files to https://buf.build/cosmos/cosmos-sdk +# This workflow is only run when a .proto file has been changed +# on: +# push: +# tags: +# - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 +# - "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # Push events to matching v*-rc*, i.e. v1.0-rc1, v20.15.10-rc2 +# - "v[0-9]+.[0-9]+.[0-9]+-beta[0-9]+" # Push events to matching v*-beta*, i.e. v1.0-beta1, v20.15.10-beta2 +# - "v[0-9]+.[0-9]+.[0-9]+-alpha[0-9]+" # Push events to matching v*-alpha*, i.e. v1.0-alpha1, v20.15.10-alpha2 +# paths: +# - "proto/**" + +# jobs: +# root: +# runs-on: depot-ubuntu-22.04-4 +# name: "Push to buf.build/cosmos/cosmos-sdk" +# steps: +# - uses: actions/checkout@v4 +# - uses: bufbuild/buf-setup-action@v1.50.0 +# - run: buf push proto --tag ${{ github.ref_type == 'tag' && github.ref_name || github.sha }} # https://github.com/bufbuild/buf-push-action/issues/20 + +## TODO at each module tag to their own buf repository diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml new file mode 100644 index 0000000..3d13217 --- /dev/null +++ b/.github/workflows/proto.yml @@ -0,0 +1,31 @@ +name: Protobuf +# Protobuf runs buf (https://buf.build/) lint and check-breakage +# This workflow is only run when a .proto file has been changed +on: + pull_request: + paths: + - "proto/**" + +permissions: + contents: read + +jobs: + lint: + runs-on: depot-ubuntu-22.04-4 + timeout-minutes: 5 + steps: + - uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.50.0 + - uses: bufbuild/buf-lint-action@v1 + with: + input: "proto" + + break-check: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.50.0 + - uses: bufbuild/buf-breaking-action@v1 + with: + input: "proto" + against: "https://github.com/${{ github.repository }}.git#branch=${{ github.event.pull_request.base.ref }},ref=HEAD~1,subdir=proto" diff --git a/.github/workflows/release-confix.yml b/.github/workflows/release-confix.yml new file mode 100644 index 0000000..feed378 --- /dev/null +++ b/.github/workflows/release-confix.yml @@ -0,0 +1,39 @@ +name: Release Confix + +on: + push: + tags: + - "tools/confix/v*.*.*" +permissions: + contents: read + +jobs: + goreleaser: + permissions: + contents: write # for goreleaser/goreleaser-action to create a GitHub release + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + # get 'v*.*.*' part from 'confix/v*.*.*' and save to $GITHUB_ENV + - name: Set env + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/confix/}" >> $GITHUB_ENV + # remove the possible pre-existing same tag for cosmos-sdk related tags instead of confix tags + # Because goreleaser enforces semantic versioning and will error on non compliant tags.(https://goreleaser.com/limitations/semver/) + - name: Tag without prefix locally to avoid error in goreleaser + run: |- + git tag -d ${{ env.RELEASE_VERSION }} || echo "No such a tag exists before" + git tag ${{ env.RELEASE_VERSION }} HEAD + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v3 + with: + # stick to version v0.179.0(https://github.com/cosmos/cosmos-sdk/issues/11125) + version: v0.179.0 + args: release --rm-dist --skip-validate --release-notes ./RELEASE_NOTES.md + workdir: tools/confix + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GORELEASER_CURRENT_TAG: confix/${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release-cosmovisor.yml b/.github/workflows/release-cosmovisor.yml new file mode 100644 index 0000000..2315f00 --- /dev/null +++ b/.github/workflows/release-cosmovisor.yml @@ -0,0 +1,39 @@ +name: Release Cosmovisor + +on: + push: + tags: + - "tools/cosmovisor/v*.*.*" +permissions: + contents: read + +jobs: + goreleaser: + permissions: + contents: write # for goreleaser/goreleaser-action to create a GitHub release + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + # get 'v*.*.*' part from 'cosmovisor/v*.*.*' and save to $GITHUB_ENV + - name: Set env + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/cosmovisor/}" >> $GITHUB_ENV + # remove the possible pre-existing same tag for cosmos-sdk related tags instead of cosmovisor tags + # Because goreleaser enforces semantic versioning and will error on non compliant tags.(https://goreleaser.com/limitations/semver/) + - name: Tag without prefix locally to avoid error in goreleaser + run: |- + git tag -d ${{ env.RELEASE_VERSION }} || echo "No such a tag exists before" + git tag ${{ env.RELEASE_VERSION }} HEAD + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v3 + with: + # stick to version v0.179.0(https://github.com/cosmos/cosmos-sdk/issues/11125) + version: v0.179.0 + args: release --rm-dist --skip-validate --release-notes ./RELEASE_NOTES.md + workdir: tools/cosmovisor + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GORELEASER_CURRENT_TAG: cosmovisor/${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release-simd.yml b/.github/workflows/release-simd.yml new file mode 100644 index 0000000..aedc981 --- /dev/null +++ b/.github/workflows/release-simd.yml @@ -0,0 +1,39 @@ +name: Release Simd + +on: + push: + tags: + - "simd/v*.*.*" +permissions: + contents: read + +jobs: + goreleaser: + permissions: + contents: write # for goreleaser/goreleaser-action to create a GitHub release + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + # get 'v*.*.*' part from 'simd/v*.*.*' and save to $GITHUB_ENV + - name: Set env + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/simd/}" >> $GITHUB_ENV + # remove the possible pre-existing same tag for cosmos-sdk related tags instead of simd tags + # Because goreleaser enforces semantic versioning and will error on non compliant tags.(https://goreleaser.com/limitations/semver/) + - name: Tag without prefix locally to avoid error in goreleaser + run: |- + git tag -d ${{ env.RELEASE_VERSION }} || echo "No such a tag exists before" + git tag ${{ env.RELEASE_VERSION }} HEAD + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v3 + with: + # stick to version v0.179.0(https://github.com/cosmos/cosmos-sdk/issues/11125) + version: v0.179.0 + args: release --rm-dist --skip-validate + workdir: simapp + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GORELEASER_CURRENT_TAG: simd/${{ env.RELEASE_VERSION }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..a4f81f0 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,51 @@ +name: Release +# This workflow helps with creating releases. +# This job will only be triggered when a tag (vX.X.x) is pushed +on: + push: + # Sequence of patterns matched against refs/tags + tags: + - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 + +permissions: + contents: read + +jobs: + release: + permissions: + contents: write # for goreleaser/goreleaser-action to create a GitHub release + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Unshallow + run: git fetch --prune --unshallow + - name: Create release + uses: goreleaser/goreleaser-action@v3 + with: + args: release --clean --release-notes ./RELEASE_NOTES.md + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + release-success: + needs: release + if: ${{ success() }} + runs-on: depot-ubuntu-22.04-4 + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: cosmos-tech + SLACK_USERNAME: Cosmos SDK Release Bot + SLACK_ICON: https://avatars.githubusercontent.com/t/5997665?size=64 + SLACK_COLOR: good + SLACK_TITLE: "Cosmos SDK ${{ github.ref_name }} is tagged :tada:" + SLACK_MESSAGE: "@channel :point_right: https://github.com/cosmos/cosmos-sdk/releases/tag/${{ github.ref_name }}" + SLACK_FOOTER: "" + SLACK_LINK_NAMES: true + MSG_MINIMAL: true diff --git a/.github/workflows/sims-047.yml b/.github/workflows/sims-047.yml new file mode 100644 index 0000000..7678736 --- /dev/null +++ b/.github/workflows/sims-047.yml @@ -0,0 +1,145 @@ +name: Sims release/0.47.x +# Sims workflow runs multiple types of simulations (nondeterminism, import-export, after-import, multi-seed-short) +# This workflow will run on all Pull Requests, if a .go, .mod or .sum file have been changed +on: + schedule: + - cron: "0 0,12 * * *" + release: + types: [published] + +concurrency: + group: ci-${{ github.ref }}-sims-047 + cancel-in-progress: true + +jobs: + build: + runs-on: ubuntu-latest + if: "!contains(github.event.head_commit.message, 'skip-sims')" + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.47.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - run: make build + + install-runsim: + permissions: + contents: none + runs-on: ubuntu-latest + needs: build + steps: + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Install runsim + run: go install github.com/cosmos/tools/cmd/runsim@v1.0.0 + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + + test-sim-import-export: + runs-on: ubuntu-latest + needs: [build, install-runsim] + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.47.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + - name: test-sim-import-export + run: | + make test-sim-import-export + + test-sim-after-import: + runs-on: ubuntu-latest + needs: [build, install-runsim] + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.47.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + - name: test-sim-after-import + run: | + make test-sim-after-import + + test-sim-multi-seed-short: + runs-on: ubuntu-latest + needs: [build, install-runsim] + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.47.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + - name: test-sim-multi-seed-short + run: | + make test-sim-multi-seed-short + + sims-notify-success: + needs: + [test-sim-multi-seed-short, test-sim-after-import, test-sim-import-export] + runs-on: ubuntu-latest + if: ${{ success() }} + steps: + - uses: actions/checkout@v4 + - name: Get previous workflow status + uses: ./.github/actions/last-workflow-status + id: last_status + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Notify Slack on success + if: ${{ steps.last_status.outputs.last_status == 'failure' }} + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Sim Tests release/0.47.x + SLACK_ICON_EMOJI: ":white_check_mark:" + SLACK_COLOR: good + SLACK_MESSAGE: 0.47.x Sims are passing + SLACK_FOOTER: "" + + sims-notify-failure: + permissions: + contents: none + needs: + [test-sim-multi-seed-short, test-sim-after-import, test-sim-import-export] + runs-on: ubuntu-latest + if: ${{ failure() }} + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Sim Tests release/0.47.x + SLACK_ICON_EMOJI: ":skull:" + SLACK_COLOR: danger + SLACK_MESSAGE: 0.47.x Sims are failing + SLACK_FOOTER: "" diff --git a/.github/workflows/sims-050.yml b/.github/workflows/sims-050.yml new file mode 100644 index 0000000..c1f49f5 --- /dev/null +++ b/.github/workflows/sims-050.yml @@ -0,0 +1,148 @@ +name: Sims release/0.50.x +# Sims workflow runs multiple types of simulations (nondeterminism, import-export, after-import, multi-seed-short) +# This workflow will run on all Pull Requests, if a .go, .mod or .sum file have been changed + +# TODO - update to 53 once we have cut release + +on: + schedule: + - cron: "0 0,12 * * *" + release: + types: [published] + +concurrency: + group: ci-${{ github.ref }}-sims-050 + cancel-in-progress: true + +jobs: + build: + runs-on: depot-ubuntu-22.04-4 + if: "!contains(github.event.head_commit.message, 'skip-sims')" + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.50.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - run: make build + + install-runsim: + permissions: + contents: none + runs-on: depot-ubuntu-22.04-4 + needs: build + steps: + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Install runsim + run: go install github.com/cosmos/tools/cmd/runsim@v1.0.0 + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + + test-sim-import-export: + runs-on: depot-ubuntu-22.04-4 + needs: [build, install-runsim] + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.50.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + - name: test-sim-import-export + run: | + make test-sim-import-export + + test-sim-after-import: + runs-on: depot-ubuntu-22.04-4 + needs: [build, install-runsim] + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.50.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + - name: test-sim-after-import + run: | + make test-sim-after-import + + test-sim-multi-seed-short: + runs-on: depot-ubuntu-22.04-4 + needs: [build, install-runsim] + steps: + - uses: actions/checkout@v4 + with: + ref: "release/v0.50.x" + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - uses: actions/cache@v4 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-runsim-binary + - name: test-sim-multi-seed-short + run: | + make test-sim-multi-seed-short + + sims-notify-success: + needs: + [test-sim-multi-seed-short, test-sim-after-import, test-sim-import-export] + runs-on: depot-ubuntu-22.04-4 + if: ${{ success() }} + steps: + - uses: actions/checkout@v4 + - name: Get previous workflow status + uses: ./.github/actions/last-workflow-status + id: last_status + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Notify Slack on success + if: ${{ steps.last_status.outputs.last_status == 'failure' }} + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Sim Tests release/0.50.x + SLACK_ICON_EMOJI: ":white_check_mark:" + SLACK_COLOR: good + SLACK_MESSAGE: 0.50.x Sims are passing + SLACK_FOOTER: "" + + sims-notify-failure: + permissions: + contents: none + needs: + [test-sim-multi-seed-short, test-sim-after-import, test-sim-import-export] + runs-on: depot-ubuntu-22.04-4 + if: ${{ failure() }} + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Sim Tests release/0.50.x + SLACK_ICON_EMOJI: ":skull:" + SLACK_COLOR: danger + SLACK_MESSAGE: 0.50.x Sims are failing + SLACK_FOOTER: "" diff --git a/.github/workflows/sims-nightly.yml b/.github/workflows/sims-nightly.yml new file mode 100644 index 0000000..c639ea7 --- /dev/null +++ b/.github/workflows/sims-nightly.yml @@ -0,0 +1,88 @@ +name: Sims Nightly (Long) +# Release Sims workflow runs long-lived (multi-seed & large block size) simulations +# This workflow only runs mightly at 8am UTC and on releases +on: + schedule: + - cron: "0 8 * * *" + release: + types: [published] + +permissions: + contents: read + +concurrency: + group: ci-${{ github.ref }}-sims-nightly-long + cancel-in-progress: true + +jobs: + test-sim-multi-seed-long: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: test-sim-multi-seed-long + env: + GOMEMLIMIT: 14GiB # reserve 2 GiB as buffer for GC to avoid OOM + run: | + make test-sim-multi-seed-long + + test-sim-import-export: + runs-on: depot-ubuntu-22.04-4 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: test-sim-import-export + env: + GOMEMLIMIT: 14GiB # reserve 2 GiB as buffer for GC to avoid OOM + run: | + make test-sim-import-export + + sims-notify-success: + needs: [test-sim-multi-seed-long, test-sim-import-export] + runs-on: depot-ubuntu-22.04-4 + if: ${{ success() }} + steps: + - name: Check out repository + uses: actions/checkout@v4 + - name: Get previous workflow status + uses: ./.github/actions/last-workflow-status + id: last_status + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Notify Slack on success + if: ${{ steps.last_status.outputs.last_status == 'failure' }} + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Sim Tests + SLACK_ICON_EMOJI: ":white_check_mark:" + SLACK_COLOR: good + SLACK_MESSAGE: Sims Nightly (Long) are passing + SLACK_FOOTER: "" + + sims-notify-failure: + permissions: + contents: none + needs: [test-sim-multi-seed-long] + runs-on: depot-ubuntu-22.04-4 + if: ${{ failure() }} + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@v2.3.3 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: sdk-sims + SLACK_USERNAME: Sim Tests + SLACK_ICON_EMOJI: ":skull:" + SLACK_COLOR: danger + SLACK_MESSAGE: Sims Nightly (Long) are failing + SLACK_FOOTER: "" diff --git a/.github/workflows/sims.yml b/.github/workflows/sims.yml new file mode 100644 index 0000000..955fe0a --- /dev/null +++ b/.github/workflows/sims.yml @@ -0,0 +1,82 @@ +name: Sims +# Sims workflow runs multiple types of simulations (nondeterminism, import-export, after-import, multi-seed-short) +# This workflow will run on all Pull Requests, if a .go, .mod or .sum file have been changed +on: + schedule: + - cron: "0 */2 * * *" + release: + types: [published] + +concurrency: + group: ci-${{ github.ref }}-sims + cancel-in-progress: true + +jobs: + build: + permissions: + contents: read # for actions/checkout to fetch code + runs-on: depot-ubuntu-22.04-16 + if: "!contains(github.event.head_commit.message, 'skip-sims')" + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - run: make build + + test-sim-import-export: + runs-on: depot-ubuntu-22.04-16 + needs: [build] + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: test-sim-import-export + run: | + make test-sim-import-export + + test-sim-after-import: + runs-on: depot-ubuntu-22.04-16 + needs: [build] + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: test-sim-after-import + run: | + make test-sim-after-import + + test-sim-deterministic: + runs-on: depot-ubuntu-22.04-16 + needs: [build] + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: test-sim-nondeterminism-streaming + run: | + make test-sim-nondeterminism-streaming + + test-sim-multi-seed-short: + runs-on: depot-ubuntu-22.04-16 + needs: [build] + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: test-sim-multi-seed-short + run: | + make test-sim-multi-seed-short diff --git a/.github/workflows/systemtests.yml b/.github/workflows/systemtests.yml new file mode 100644 index 0000000..bd92e16 --- /dev/null +++ b/.github/workflows/systemtests.yml @@ -0,0 +1,63 @@ +name: System Tests +on: + pull_request: + merge_group: + push: + branches: + - main + - release/v0.53.x + +permissions: + contents: read + +concurrency: + group: ci-${{ github.ref }}-system-tests + cancel-in-progress: true + +jobs: + setup: + runs-on: depot-ubuntu-22.04-4 + outputs: + git_diff: ${{ steps.git_diff.outputs.diff }} + steps: + - uses: actions/checkout@v4 + with: + fetch-tags: true + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: | + ./go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.go + go.mod + go.sum + **/go.mod + **/go.sum + **/Makefile + Makefile + + test-system: + needs: setup + if: needs.setup.outputs.git_diff + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - name: Run system tests + run: | + make test-system + + test-system-legacy: + needs: setup + if: needs.setup.outputs.git_diff + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - name: Run legacy system tests + run: | + COSMOS_BUILD_OPTIONS=legacy make test-system diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..591b24e --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,640 @@ +name: Tests / Code Coverage +on: + pull_request: + merge_group: + push: + branches: + - main + +permissions: + contents: write + pull-requests: write + +concurrency: + group: ci-${{ github.ref }}-tests + cancel-in-progress: true + +jobs: + split-test-files: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + - name: Create a file with all core Cosmos SDK pkgs + run: go list ./... > pkgs.txt + - name: Split pkgs into 4 files + run: split -d -n l/4 pkgs.txt pkgs.txt.part. + - uses: actions/upload-artifact@v4 + with: + name: "${{ github.sha }}-00" + path: ./pkgs.txt.part.00 + - uses: actions/upload-artifact@v4 + with: + name: "${{ github.sha }}-01" + path: ./pkgs.txt.part.01 + - uses: actions/upload-artifact@v4 + with: + name: "${{ github.sha }}-02" + path: ./pkgs.txt.part.02 + - uses: actions/upload-artifact@v4 + with: + name: "${{ github.sha }}-03" + path: ./pkgs.txt.part.03 + + tests: + runs-on: depot-ubuntu-22.04-4 + needs: split-test-files + strategy: + fail-fast: false + matrix: + part: ["00", "01", "02", "03"] + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.go + go.mod + go.sum + **/go.mod + **/go.sum + **/Makefile + Makefile + - uses: actions/download-artifact@v4 + with: + name: "${{ github.sha }}-${{ matrix.part }}" + - name: test & coverage report creation + if: env.GIT_DIFF + run: | + cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -race -timeout 30m -coverprofile=${{ matrix.part }}profile.out -covermode=atomic -tags='ledger test_ledger_mock' + - uses: actions/upload-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-${{ matrix.part }}-coverage" + path: ./${{ matrix.part }}profile.out + + test-integration: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.go + go.mod + go.sum + **/go.mod + **/go.sum + **/Makefile + Makefile + - name: integration tests + if: env.GIT_DIFF + run: | + make test-integration-cov + - uses: actions/upload-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-integration-coverage" + path: ./tests/integration-profile.out + + test-e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.21" + check-latest: true + cache: true + cache-dependency-path: go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.go + go.mod + go.sum + **/go.mod + **/go.sum + **/Makefile + Makefile + - name: e2e tests + if: env.GIT_DIFF + run: | + make test-e2e-cov + - uses: actions/upload-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-e2e-coverage" + path: ./tests/e2e-profile.out + + repo-analysis: + runs-on: depot-ubuntu-22.04-4 + needs: [tests, test-integration, test-e2e] + steps: + - uses: actions/checkout@v4 + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.go + go.mod + go.sum + **/go.mod + **/go.sum + - uses: actions/download-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-00-coverage" + - uses: actions/download-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-01-coverage" + - uses: actions/download-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-02-coverage" + - uses: actions/download-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-03-coverage" + - uses: actions/download-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-integration-coverage" + - uses: actions/download-artifact@v4 + if: env.GIT_DIFF + with: + name: "${{ github.sha }}-e2e-coverage" + continue-on-error: true + + test-sim-nondeterminism: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + **/*.go + go.mod + go.sum + **/go.mod + **/go.sum + **/Makefile + Makefile + - name: test-sim-nondeterminism + if: env.GIT_DIFF + run: | + make test-sim-nondeterminism + + ############################### + #### Cosmos SDK Submodules #### + ############################### + + # NOTE: The following jobs are used to test the Cosmos SDK Go submodules. + # They run when there is a diff in their respective directories. + + test-clientv2: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: client/v2/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + client/v2/**/*.go + client/v2/go.mod + client/v2/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd client/v2 + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-core: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: core/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + core/**/*.go + core/go.mod + core/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd core + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-depinject: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.22" + check-latest: false + cache: true + cache-dependency-path: depinject/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + depinject/**/*.go + depinject/go.mod + depinject/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd depinject + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-errors: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.22" + check-latest: true + cache: true + cache-dependency-path: errors/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + errors/**/*.go + errors/go.mod + errors/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd errors + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-math: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.22" + check-latest: true + cache: true + cache-dependency-path: math/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + math/**/*.go + math/go.mod + math/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd math + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-schema: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.12" + cache: true + cache-dependency-path: schema/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + schema/**/*.go + schema/go.mod + schema/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd schema + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-collections: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: collections/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + collections/**/*.go + collections/go.mod + collections/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd collections + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-cosmovisor: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: tools/cosmovisor/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + tools/cosmovisor/**/*.go + tools/cosmovisor/go.mod + tools/cosmovisor/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd tools/cosmovisor + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-confix: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: tools/confix/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + tools/confix/**/*.go + tools/confix/go.mod + tools/confix/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd tools/confix + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-store: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: store/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + store/**/*.go + store/go.mod + store/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd store + (cd streaming/abci/examples/file && go build .) + go test -ldflags "-r /usr/local/lib" -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-log: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.21" + check-latest: true + cache: true + cache-dependency-path: log/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + log/*.go + log/go.mod + log/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd log + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + ############################# + ### Cosmos SDK x/{module} ### + ############################# + + # NOTE: The following jobs are used to test the Cosmos SDK Go submodules present under x/{module}. + # They run when there is a diff in their respective directories. + + test-x-tx: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: x/tx/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + x/tx/**/*.go + x/tx/go.mod + x/tx/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd x/tx + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-x-nft: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: x/nft/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + x/nft/**/*.go + x/nft/go.mod + x/nft/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd x/nft + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-x-circuit: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: x/circuit/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + x/circuit/**/*.go + x/circuit/go.mod + x/circuit/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd x/circuit + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-x-feegrant: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: x/feegrant/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + x/feegrant/**/*.go + x/feegrant/go.mod + x/feegrant/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd x/feegrant + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-x-evidence: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: x/evidence/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + x/evidence/**/*.go + x/evidence/go.mod + x/evidence/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd x/evidence + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-x-upgrade: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23" + check-latest: true + cache: true + cache-dependency-path: x/upgrade/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + x/upgrade/**/*.go + x/upgrade/go.mod + x/upgrade/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd x/upgrade + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace ledger test_ledger_mock' ./... + + test-tools-benchmark: + runs-on: depot-ubuntu-22.04-4 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: "1.23.2" + check-latest: true + cache: true + cache-dependency-path: tools/benchmark/go.sum + - uses: technote-space/get-diff-action@v6.1.2 + id: git_diff + with: + PATTERNS: | + tools/benchmark/**/*.go + tools/benchmark/go.mod + tools/benchmark/go.sum + - name: tests + if: env.GIT_DIFF + run: | + cd tools/benchmark + go test -mod=readonly -timeout 30m -coverprofile=coverage.out -covermode=atomic -tags='norace' ./... diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9bf4d6f --- /dev/null +++ b/.gitignore @@ -0,0 +1,67 @@ +# OS +.DS_Store +*.swp +*.swo +*.swl +*.swm +*.swn +*.pyc + +# private files +private[.-]* +private + +# Build +vendor +build +dist +tools-stamp +buf-stamp +artifacts +simapp/simd/simd + +# Go +go.work +go.work.sum + +# Data - ideally these don't exist +baseapp/data/* +client/lcd/keys/* +.testnets + +# Testing +coverage.out +coverage.txt +*profile.out +sim_log_file +x/genutil/config +x/genutil/data +*.fail + +# Vagrant +.vagrant/ +*.box +*.log +vagrant + +# IDE +.idea +*.iml +*.ipr +*.iws +.dir-locals.el +.vscode + +.venv + +# Depinject & Graphviz +dependency-graph.png +debug_container.dot +debug_container.log + +# Latex +*.aux +*.out +*.synctex.gz +/x/genutil/config/priv_validator_key.json +/x/genutil/data/priv_validator_state.json diff --git a/.gitpod.yml b/.gitpod.yml new file mode 100644 index 0000000..1fde50f --- /dev/null +++ b/.gitpod.yml @@ -0,0 +1 @@ +image: ghcr.io/notional-labs/cosmos diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..e5592dc --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,144 @@ +version: "2" +run: + build-tags: + - e2e + - ledger + - test_ledger_mock + - sims + tests: true + allow-parallel-runners: true +linters: + default: none + enable: + - copyloopvar + - dogsled + - errcheck + - errorlint + - goconst + - gocritic + - gosec + - govet + - ineffassign + - misspell + - nakedret + - nolintlint + - revive + - staticcheck + - thelper + - unconvert + - unused + settings: + dogsled: + max-blank-identifiers: 6 + gocritic: + disabled-checks: + - regexpMust + - appendAssign + - ifElseChain + gosec: + excludes: + - G101 + - G107 + - G404 + confidence: medium + misspell: + locale: US + nolintlint: + require-explanation: true + require-specific: false + allow-unused: false + revive: + rules: + - name: redefines-builtin-id + disabled: true + staticcheck: + checks: + - all + unused: + local-variables-are-used: false + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - linters: + - staticcheck + text: 'ST1003:' + - linters: + - staticcheck + text: 'ST1016:' + - linters: + - staticcheck + path: migrations + text: 'SA1019:' + - linters: + - staticcheck + text: 'SA1019: codec.NewAminoCodec is deprecated' + - linters: + - staticcheck + text: 'SA1019: legacybech32.MustMarshalPubKey' + - linters: + - staticcheck + text: 'SA1019: legacybech32.MarshalPubKey' + - linters: + - staticcheck + text: 'SA1019: legacybech32.UnmarshalPubKey' + - linters: + - staticcheck + text: 'SA1019: params.SendEnabled is deprecated' + - linters: + - gosec + text: 'G115: integer overflow conversion' + - linters: + - nolintlint + text: leading space + paths: + - server/grpc/gogoreflection/fix_registration.go + - fix_registration.go + - .*\.pb\.go$ + - .*\.pb\.gw\.go$ + - .*\.pulsar\.go$ + - crypto/keys/secp256k1/internal/* + - types/coin_regex.go + - testutil/testdata + - x/params + - x/crisis + - third_party$ + - builtin$ + - examples$ +issues: + max-issues-per-linter: 10000 + max-same-issues: 10000 +formatters: + enable: + - gci + - gofumpt + settings: + gci: + sections: + - standard + - default + - prefix(cosmossdk.io) + - prefix(github.com/cosmos/cosmos-sdk) + custom-order: true + gofumpt: + extra-rules: true + exclusions: + generated: lax + paths: + - server/grpc/gogoreflection/fix_registration.go + - fix_registration.go + - .*\.pb\.go$ + - .*\.pb\.gw\.go$ + - .*\.pulsar\.go$ + - crypto/keys/secp256k1/internal/* + - types/coin_regex.go + - testutil/testdata + - x/params + - x/crisis + - third_party$ + - builtin$ + - examples$ diff --git a/.goreleaser.yml b/.goreleaser.yml new file mode 100644 index 0000000..d3a6e16 --- /dev/null +++ b/.goreleaser.yml @@ -0,0 +1,27 @@ +--- +project_name: cosmos-sdk + +release: + github: + owner: cosmos + name: cosmos-sdk + +builds: + - skip: true + +archives: + - format: tar.gz + wrap_in_directory: true + format_overrides: + - goos: windows + format: zip + name_template: "{{ .Binary }}-{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" + files: + - LICENSE + - README.md + +snapshot: + name_template: SNAPSHOT-{{ .Commit }} + +changelog: + disable: false diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 0000000..1327171 --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,12 @@ +{ + "default": true, + "MD004": {"style": "asterisk"}, + "MD007": { "indent": 4 }, + "MD013": false, + "MD024": { "siblings_only": true }, + "MD025": false, + "MD033": false, + "MD034": false, + "no-hard-tabs": false, + "whitespace": false +} \ No newline at end of file diff --git a/.markdownlintignore b/.markdownlintignore new file mode 100644 index 0000000..b554d03 --- /dev/null +++ b/.markdownlintignore @@ -0,0 +1,2 @@ +docs/node_modules +/README.md \ No newline at end of file diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 0000000..cd05536 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,58 @@ +queue_rules: + - name: default + queue_conditions: + - "#approved-reviews-by>1" + - base=main + - label=A:automerge + merge_conditions: + - "#approved-reviews-by>1" + commit_message_template: | + {{ title }} (#{{ number }}) + {{ body }} + merge_method: squash + +pull_request_rules: + - name: backport patches to v0.53.x branch + conditions: + - base=main + - label=backport/v0.53.x + actions: + backport: + branches: + - release/v0.53.x + - name: backport patches to v0.50.x branch + conditions: + - base=main + - label=backport/v0.50.x + actions: + backport: + branches: + - release/v0.50.x + - name: backport patches to v0.47.x branch + conditions: + - base=main + - label=backport/v0.47.x + actions: + backport: + branches: + - release/v0.47.x + - name: backport patches to v0.46.x branch + conditions: + - base=main + - label=backport/0.46.x + actions: + backport: + branches: + - release/v0.46.x + - name: backport patches to v0.45.x branch + conditions: + - base=main + - label=backport/0.45.x + actions: + backport: + branches: + - release/v0.45.x + - name: automerge to main with label automerge and branch protection passing + conditions: [] + actions: + queue: \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..54971c3 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,736 @@ + + +# Changelog + +## [v0.53.6](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.53.6) - 2026-02-10 + +### Improvements + +* (deps) [#25710](https://github.com/cosmos/cosmos-sdk/pull/25710) Bump github.com/cosmos/ledger-cosmos-go from 0.16.0 to 1.0.0 +* (deps) [#25820](https://github.com/cosmos/cosmos-sdk/pull/25820) Bump github.com/cometbft/cometbft from 0.80.20 to 0.38.21 + +### Bug Fixes + +* (x/auth) [#25871](https://github.com/cosmos/cosmos-sdk/pull/25871) Limits pagination at default for values that exceed it. +* (events) [#25881](https://github.com/cosmos/cosmos-sdk/pull/25881) Add `OverrideEvents` to `EventManagerI`. + +## [v0.53.5](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.53.5) - 2025-12-12 + +### Features + +* (crypto/ledger) [#25435](https://github.com/cosmos/cosmos-sdk/pull/25435) Add SetDERConversion to reset skipDERConversion and App name for ledger. +* (gRPC) [#25565](https://github.com/cosmos/cosmos-sdk/pull/25565) Support for multi gRPC query clients serve with historical binaries to serve proper historical state. +* (blockstm) [#25600](https://github.com/cosmos/cosmos-sdk/pull/25600) Allow dynamic retrieval of the coin denomination from multi store at runtime. +* [#25516](https://github.com/cosmos/cosmos-sdk/pull/25516) Support automatic configuration of OpenTelemetry via [OpenTelemetry declarative configuration](https://pkg.go.dev/go.opentelemetry.io/contrib/otelconf) and add OpenTelemetry instrumentation of `BaseApp`. + +### Improvements + +* (x/mint) [#25562](https://github.com/cosmos/cosmos-sdk/pull/25562) Improve and test `x/mint` params validation. +* (server) [#25632](https://github.com/cosmos/cosmos-sdk/pull/25632) Add missing call to close the app on shutdown. + +### Bug Fixes + +* (cli) [#25485](https://github.com/cosmos/cosmos-sdk/pull/25485) Avoid failed to convert address field in `withdraw-validator-commission` cmd. +* (baseapp) [#25642](https://github.com/cosmos/cosmos-sdk/pull/25642) Mark pre-block events for indexing based on local configuration. + +### Deprecated + +* (x/nft) [#24575](https://github.com/cosmos/cosmos-sdk/pull/24575) Deprecate the `x/nft` module in the Cosmos SDK repository. This module will not be maintained to the extent that our core modules will and will be kept in a [legacy repo](https://github.com/cosmos/cosmos-legacy). +* (x/group) [#24571](https://github.com/cosmos/cosmos-sdk/pull/24571) Deprecate the `x/group` module in the Cosmos SDK repository. This module will not be maintained to the extent that our core modules will and will be kept in a [legacy repo](https://github.com/cosmos/cosmos-legacy). +* (types) [#24664](https://github.com/cosmos/cosmos-sdk/pull/24664) Deprecate the `Invariant` type in the Cosmos SDK. +* [#25516](https://github.com/cosmos/cosmos-sdk/pull/25516) Deprecate all existing methods and types in the `telemetry` package, usage of `github.com/hashicorp/go-metrics` and the `telemetry` configuration section. New instrumentation should use the official [OpenTelemetry go API](https://pkg.go.dev/go.opentelemetry.io/otel) and Cosmos SDK appllications can automatically expose OpenTelemetry metrics, traces and logs via [OpenTelemetry declarative configuration](https://pkg.go.dev/go.opentelemetry.io/contrib/otelconf). + +## [v0.53.4](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.53.3) - 2025-07-25 + +This patch update also includes minor dependency bumps. + +### Features + +* (abci_utils) [#25008](https://github.com/cosmos/cosmos-sdk/pull/24861) add the ability to assign a custom signer extraction adapter in `DefaultProposalHandler`. + +## [v0.53.3](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.53.3) - 2025-07-08 + +### Bug Fixes + +* [GHSA-p22h-3m2v-cmgh](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-p22h-3m2v-cmgh) Fix x/distribution can halt when historical rewards overflow. + + +## [v0.53.2](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.53.2) - 2025-06-02 + +This patch update also includes minor dependency bumps. + +### Bug Fixes + +* (x/epochs) [#24770](https://github.com/cosmos/cosmos-sdk/pull/24770) Fix register of epoch hooks in `InvokeSetHooks`. + +## [v0.53.0](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.53.0) - 2025-04-29 + +### Features + +* (simsx) [#24062](https://github.com/cosmos/cosmos-sdk/pull/24062) [#24145](https://github.com/cosmos/cosmos-sdk/pull/24145) Add new simsx framework on top of simulations for better module dev experience. +* (baseapp) [#24069](https://github.com/cosmos/cosmos-sdk/pull/24069) Create CheckTxHandler to allow extending the logic of CheckTx. +* (types) [#24093](https://github.com/cosmos/cosmos-sdk/pull/24093) Added a new method, `IsGT`, for `types.Coin`. This method is used to check if a `types.Coin` is greater than another `types.Coin`. +* (client/keys) [#24071](https://github.com/cosmos/cosmos-sdk/pull/24071) Add support for importing hex key using standard input. +* (types) [#23780](https://github.com/cosmos/cosmos-sdk/pull/23780) Add a ValueCodec for the math.Uint type that can be used in collections maps. +* (perf)[#24045](https://github.com/cosmos/cosmos-sdk/pull/24045) Sims: Replace runsim command with Go stdlib testing. CLI: `Commit` default true, `Lean`, `SimulateEveryOperation`, `PrintAllInvariants`, `DBBackend` params removed +* (crypto/keyring) [#24040](https://github.com/cosmos/cosmos-sdk/pull/24040) Expose the db keyring used in the keystore. +* (types) [#23919](https://github.com/cosmos/cosmos-sdk/pull/23919) Add MustValAddressFromBech32 function. +* (all) [#23708](https://github.com/cosmos/cosmos-sdk/pull/23708) Add unordered transaction support. + * Adds a `--timeout-timestamp` flag that allows users to specify a block time at which the unordered transactions should expire from the mempool. +* (x/epochs) [#23815](https://github.com/cosmos/cosmos-sdk/pull/23815) Upstream `x/epochs` from Osmosis +* (client) [#23811](https://github.com/cosmos/cosmos-sdk/pull/23811) Add auto cli for node service. +* (genutil) [#24018](https://github.com/cosmos/cosmos-sdk/pull/24018) Allow manually setting the consensus key type in genesis +* (client) [#18557](https://github.com/cosmos/cosmos-sdk/pull/18557) Add `--qrcode` flag to `keys show` command to support displaying keys address QR code. +* (x/auth) [#24030](https://github.com/cosmos/cosmos-sdk/pull/24030) Allow usage of ed25519 keys for transaction signing. +* (baseapp) [#24163](https://github.com/cosmos/cosmos-sdk/pull/24163) Add `StreamingManager` to baseapp to extend the abci listeners. +* (x/protocolpool) [#23933](https://github.com/cosmos/cosmos-sdk/pull/23933) Add x/protocolpool module. + * x/distribution can now utilize an externally managed community pool. NOTE: this will make the message handlers for FundCommunityPool and CommunityPoolSpend error, as well as the query handler for CommunityPool. +* (client) [#18101](https://github.com/cosmos/cosmos-sdk/pull/18101) Add a `keyring-default-keyname` in `client.toml` for specifying a default key name, and skip the need to use the `--from` flag when signing transactions. +* (x/gov) [#24355](https://github.com/cosmos/cosmos-sdk/pull/24355) Allow users to set a custom CalculateVoteResultsAndVotingPower function to be used in govkeeper.Tally. +* (x/mint) [#24436](https://github.com/cosmos/cosmos-sdk/pull/24436) Allow users to set a custom minting function used in the `x/mint` begin blocker. + * The `InflationCalculationFn` argument to `mint.NewAppModule()` is now ignored and must be nil. To set a custom `InflationCalculationFn` on the default minter, use `mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(customInflationFn))`. +* (api) [#24428](https://github.com/cosmos/cosmos-sdk/pull/24428) Add block height to response headers + +### Improvements + +* (client) [#24561](https://github.com/cosmos/cosmos-sdk/pull/24561) TimeoutTimestamp flag has been changed to TimeoutDuration, which now sets the timeout timestamp of unordered transactions to the current time + duration passed. +* (telemetry) [#24541](https://github.com/cosmos/cosmos-sdk/pull/24541) Telemetry now includes a pre_blocker metric key. x/upgrade should migrate to this key in v0.54.0. +* (x/auth) [#24541](https://github.com/cosmos/cosmos-sdk/pull/24541) x/auth's PreBlocker now emits telemetry under the pre_blocker metric key. +* (x/bank) [#24431](https://github.com/cosmos/cosmos-sdk/pull/24431) Reduce the number of `ValidateDenom` calls in `bank.SendCoins` and `Coin`. + * The `AmountOf()` method on`sdk.Coins` no longer will `panic` if given an invalid denom and will instead return a zero value. +* (x/staking) [#24391](https://github.com/cosmos/cosmos-sdk/pull/24391) Replace panics with error results; more verbose error messages +* (x/staking) [#24354](https://github.com/cosmos/cosmos-sdk/pull/24354) Optimize validator endblock by reducing bech32 conversions, resulting in significant performance improvement +* (client/keys) [#18950](https://github.com/cosmos/cosmos-sdk/pull/18950) Improve ` keys add`, ` keys import` and ` keys rename` by checking name validation. +* (client/keys) [#18703](https://github.com/cosmos/cosmos-sdk/pull/18703) Improve ` keys add` and ` keys show` by checking whether there are duplicate keys in the multisig case. +* (client/keys) [#18745](https://github.com/cosmos/cosmos-sdk/pull/18745) Improve ` keys export` and ` keys mnemonic` by adding --yes option to skip interactive confirmation. +* (x/bank) [#24106](https://github.com/cosmos/cosmos-sdk/pull/24106) `SendCoins` now checks for `SendRestrictions` before instead of after deducting coins using `subUnlockedCoins`. +* (crypto/ledger) [#24036](https://github.com/cosmos/cosmos-sdk/pull/24036) Improve error message when deriving paths using index > 100 +* (gRPC) [#23844](https://github.com/cosmos/cosmos-sdk/pull/23844) Add debug log prints for each gRPC request. +* (gRPC) [#24073](https://github.com/cosmos/cosmos-sdk/pull/24073) Adds error handling for out-of-gas panics in grpc query handlers. +* (server) [#24072](https://github.com/cosmos/cosmos-sdk/pull/24072) Return BlockHeader by shallow copy in server Context. +* (x/bank) [#24053](https://github.com/cosmos/cosmos-sdk/pull/24053) Resolve a foot-gun by swapping send restrictions check in `InputOutputCoins` before coin deduction. +* (codec/types) [#24336](https://github.com/cosmos/cosmos-sdk/pull/24336) Most types definitions were moved to `github.com/cosmos/gogoproto/types/any` with aliases to these left in `codec/types` so that there should be no breakage to existing code. This allows protobuf generated code to optionally reference the SDK's custom `Any` type without a direct dependency on the SDK. This can be done by changing the `protoc` `M` parameter for `any.proto` to `Mgoogle/protobuf/any.proto=github.com/cosmos/gogoproto/types/any`. + +### Bug Fixes + +* (x/gov)[#24460](https://github.com/cosmos/cosmos-sdk/pull/24460) Do not call Remove during Walk in defaultCalculateVoteResultsAndVotingPower. +* (baseapp) [24261](https://github.com/cosmos/cosmos-sdk/pull/24261) Fix post handler error always results in code 1 +* (server) [#24068](https://github.com/cosmos/cosmos-sdk/pull/24068) Allow align block header with skip check header in grpc server. +* (x/gov) [#24044](https://github.com/cosmos/cosmos-sdk/pull/24044) Fix some places in which we call Remove inside a Walk (x/gov). +* (baseapp) [#24042](https://github.com/cosmos/cosmos-sdk/pull/24042) Fixed a data race inside BaseApp.getContext, found by end-to-end (e2e) tests. +* (client/server) [#24059](https://github.com/cosmos/cosmos-sdk/pull/24059) Consistently set viper prefix in client and server. It defaults for the binary name for both client and server. +* (client/keys) [#24041](https://github.com/cosmos/cosmos-sdk/pull/24041) `keys delete` won't terminate when a key is not found, but will log the error. +* (baseapp) [#24027](https://github.com/cosmos/cosmos-sdk/pull/24027) Ensure that `BaseApp.Init` checks that the commit multistore is set to protect against nil dereferences. +* (x/group) [GHSA-47ww-ff84-4jrg](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-47ww-ff84-4jrg) Fix x/group can halt when erroring in EndBlocker +* (x/distribution) [#23934](https://github.com/cosmos/cosmos-sdk/pull/23934) Fix vulnerability in `incrementReferenceCount` in distribution. +* (baseapp) [#23879](https://github.com/cosmos/cosmos-sdk/pull/23879) Ensure finalize block response is not empty in the defer check of FinalizeBlock to avoid panic by nil pointer. +* (query) [#23883](https://github.com/cosmos/cosmos-sdk/pull/23883) Fix NPE in query pagination. +* (client) [#23860](https://github.com/cosmos/cosmos-sdk/pull/23860) Add missing `unordered` field for legacy amino signing of tx body. +* (x/bank) [#23836](https://github.com/cosmos/cosmos-sdk/pull/23836) Fix `DenomMetadata` rpc allow value with slashes. +* (query) [87d3a43](https://github.com/cosmos/cosmos-sdk/commit/87d3a432af95f4cf96aa02351ed5fcc51cca6e7b) Fix collection filtered pagination. +* (sims) [#23952](https://github.com/cosmos/cosmos-sdk/pull/23952) Use liveness matrix for validator sign status in sims +* (baseapp) [#24055](https://github.com/cosmos/cosmos-sdk/pull/24055) Align block header when query with latest height. +* (baseapp) [#24074](https://github.com/cosmos/cosmos-sdk/pull/24074) Use CometBFT's ComputeProtoSizeForTxs in defaultTxSelector.SelectTxForProposal for consistency. +* (cli) [#24090](https://github.com/cosmos/cosmos-sdk/pull/24090) Prune cmd should disable async pruning. +* (x/auth) [#19239](https://github.com/cosmos/cosmos-sdk/pull/19239) Sets from flag in multi-sign command to avoid no key name provided error. +* (x/auth) [#23741](https://github.com/cosmos/cosmos-sdk/pull/23741) Support legacy global AccountNumber for legacy compatibility. +* (baseapp) [#24526](https://github.com/cosmos/cosmos-sdk/pull/24526) Fix incorrect retention height when `commitHeight` equals `minRetainBlocks`. +* (x/protocolpool) [#24594](https://github.com/cosmos/cosmos-sdk/pull/24594) Fix NPE when initializing module via depinject. +* (x/epochs) [#24610](https://github.com/cosmos/cosmos-sdk/pull/24610) Fix semantics of `CurrentEpochStartHeight` being set before epoch has started. + +## [v0.50.13](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.13) - 2025-03-12 + +### Bug Fixes + +* [GHSA-47ww-ff84-4jrg](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-47ww-ff84-4jrg) Fix x/group can halt when erroring in EndBlocker + +## [v0.50.12](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.12) - 2025-02-20 + +### Bug Fixes + +* [GHSA-x5vx-95h7-rv4p](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-x5vx-95h7-rv4p) Fix Group module can halt chain when handling a malicious proposal. + +## [v0.50.11](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.11) - 2024-12-16 + +### Features + +* (crypto/keyring) [#21653](https://github.com/cosmos/cosmos-sdk/pull/21653) New Linux-only backend that adds Linux kernel's `keyctl` support. + +### Improvements + +* (server) [#21941](https://github.com/cosmos/cosmos-sdk/pull/21941) Regenerate addrbook.json for in place testnet. + +### Bug Fixes + +* Fix [ABS-0043/ABS-0044](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-8wcc-m6j2-qxvm) Limit recursion depth for unknown field detection and unpack any +* (server) [#22564](https://github.com/cosmos/cosmos-sdk/pull/22564) Fix fallback genesis path in server +* (x/group) [#22425](https://github.com/cosmos/cosmos-sdk/pull/22425) Proper address rendering in error +* (sims) [#21906](https://github.com/cosmos/cosmos-sdk/pull/21906) Skip sims test when running dry on validators +* (cli) [#21919](https://github.com/cosmos/cosmos-sdk/pull/21919) Query address-by-acc-num by account_id instead of id. +* (x/group) [#22229](https://github.com/cosmos/cosmos-sdk/pull/22229) Accept `1` and `try` in CLI for group proposal exec. + +## [v0.50.10](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.10) - 2024-09-20 + +### Features + +* (cli) [#20779](https://github.com/cosmos/cosmos-sdk/pull/20779) Added `module-hash-by-height` command to query and retrieve module hashes at a specified blockchain height, enhancing debugging capabilities. +* (cli) [#21372](https://github.com/cosmos/cosmos-sdk/pull/21372) Added a `bulk-add-genesis-account` genesis command to add many genesis accounts at once. +* (types/collections) [#21724](https://github.com/cosmos/cosmos-sdk/pull/21724) Added `LegacyDec` collection value. + +### Improvements + +* (x/bank) [#21460](https://github.com/cosmos/cosmos-sdk/pull/21460) Added `Sender` attribute in `MsgMultiSend` event. +* (genutil) [#21701](https://github.com/cosmos/cosmos-sdk/pull/21701) Improved error messages for genesis validation. +* (testutil/integration) [#21816](https://github.com/cosmos/cosmos-sdk/pull/21816) Allow to pass baseapp options in `NewIntegrationApp`. + +### Bug Fixes + +* (runtime) [#21769](https://github.com/cosmos/cosmos-sdk/pull/21769) Fix baseapp options ordering to avoid overwriting options set by modules. +* (x/consensus) [#21493](https://github.com/cosmos/cosmos-sdk/pull/21493) Fix regression that prevented to upgrade to > v0.50.7 without consensus version params. +* (baseapp) [#21256](https://github.com/cosmos/cosmos-sdk/pull/21256) Halt height will not commit the block indicated, meaning that if halt-height is set to 10, only blocks until 9 (included) will be committed. This is to go back to the original behavior before a change was introduced in v0.50.0. +* (baseapp) [#21444](https://github.com/cosmos/cosmos-sdk/pull/21444) Follow-up, Return PreBlocker events in FinalizeBlockResponse. +* (baseapp) [#21413](https://github.com/cosmos/cosmos-sdk/pull/21413) Fix data race in sdk mempool. + +## [v0.50.9](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.9) - 2024-08-07 + +## Bug Fixes + +* (baseapp) [#21159](https://github.com/cosmos/cosmos-sdk/pull/21159) Return PreBlocker events in FinalizeBlockResponse. +* [#20939](https://github.com/cosmos/cosmos-sdk/pull/20939) Fix collection reverse iterator to include `pagination.key` in the result. +* (client/grpc) [#20969](https://github.com/cosmos/cosmos-sdk/pull/20969) Fix `node.NewQueryServer` method not setting `cfg`. +* (testutil/integration) [#21006](https://github.com/cosmos/cosmos-sdk/pull/21006) Fix `NewIntegrationApp` method not writing default genesis to state. +* (runtime) [#21080](https://github.com/cosmos/cosmos-sdk/pull/21080) Fix `app.yaml` / `app.json` incompatibility with `depinject v1.0.0`. + +## [v0.50.8](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.8) - 2024-07-15 + +## Features + +* (client) [#20690](https://github.com/cosmos/cosmos-sdk/pull/20690) Import mnemonic from file + +## Improvements + +* (x/authz,x/feegrant) [#20590](https://github.com/cosmos/cosmos-sdk/pull/20590) Provide updated keeper in depinject for authz and feegrant modules. +* [#20631](https://github.com/cosmos/cosmos-sdk/pull/20631) Fix json parsing in the wait-tx command. +* (x/auth) [#20438](https://github.com/cosmos/cosmos-sdk/pull/20438) Add `--skip-signature-verification` flag to multisign command to allow nested multisigs. + +## Bug Fixes + +* (simulation) [#17911](https://github.com/cosmos/cosmos-sdk/pull/17911) Fix all problems with executing command `make test-sim-custom-genesis-fast` for simulation test. +* (simulation) [#18196](https://github.com/cosmos/cosmos-sdk/pull/18196) Fix the problem of `validator set is empty after InitGenesis` in simulation test. + +## [v0.50.7](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.7) - 2024-06-04 + +### Improvements + +* (debug) [#20328](https://github.com/cosmos/cosmos-sdk/pull/20328) Add consensus address for debug cmd. +* (runtime) [#20264](https://github.com/cosmos/cosmos-sdk/pull/20264) Expose grpc query router via depinject. +* (x/consensus) [#20381](https://github.com/cosmos/cosmos-sdk/pull/20381) Use Comet utility for consensus module consensus param updates. +* (client) [#20356](https://github.com/cosmos/cosmos-sdk/pull/20356) Overwrite client context when available in `SetCmdClientContext`. + +### Bug Fixes + +* (baseapp) [#20346](https://github.com/cosmos/cosmos-sdk/pull/20346) Correctly assign `execModeSimulate` to context for `simulateTx`. +* (baseapp) [#20144](https://github.com/cosmos/cosmos-sdk/pull/20144) Remove txs from mempool when AnteHandler fails in recheck. +* (baseapp) [#20107](https://github.com/cosmos/cosmos-sdk/pull/20107) Avoid header height overwrite block height. +* (cli) [#20020](https://github.com/cosmos/cosmos-sdk/pull/20020) Make bootstrap-state command support both new and legacy genesis format. +* (testutil/sims) [#20151](https://github.com/cosmos/cosmos-sdk/pull/20151) Set all signatures and don't overwrite the previous one in `GenSignedMockTx`. + +## [v0.50.6](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.6) - 2024-04-22 + +### Features + +* (types) [#19759](https://github.com/cosmos/cosmos-sdk/pull/19759) Align SignerExtractionAdapter in PriorityNonceMempool Remove. +* (client) [#19870](https://github.com/cosmos/cosmos-sdk/pull/19870) Add new query command `wait-tx`. Alias `event-query-tx-for` to `wait-tx` for backward compatibility. + +### Improvements + +* (telemetry) [#19903](https://github.com/cosmos/cosmos-sdk/pull/19903) Conditionally emit metrics based on enablement. + * **Introduction of `Now` Function**: Added a new function called `Now` to the telemetry package. It returns the current system time if telemetry is enabled, or a zero time if telemetry is not enabled. + * **Atomic Global Variable**: Implemented an atomic global variable to manage the state of telemetry's enablement. This ensures thread safety for the telemetry state. + * **Conditional Telemetry Emission**: All telemetry functions have been updated to emit metrics only when telemetry is enabled. They perform a check with `isTelemetryEnabled()` and return early if telemetry is disabled, minimizing unnecessary operations and overhead. +* (deps) [#19810](https://github.com/cosmos/cosmos-sdk/pull/19810) Upgrade prometheus version and fix API breaking change due to prometheus bump. +* (deps) [#19810](https://github.com/cosmos/cosmos-sdk/pull/19810) Bump `cosmossdk.io/store` to v1.1.0. +* (server) [#19884](https://github.com/cosmos/cosmos-sdk/pull/19884) Add start customizability to start command options. +* (x/gov) [#19853](https://github.com/cosmos/cosmos-sdk/pull/19853) Emit `depositor` in `EventTypeProposalDeposit`. +* (x/gov) [#19844](https://github.com/cosmos/cosmos-sdk/pull/19844) Emit the proposer of governance proposals. +* (baseapp) [#19616](https://github.com/cosmos/cosmos-sdk/pull/19616) Don't share gas meter in tx execution. + +## Bug Fixes + +* (x/authz) [#20114](https://github.com/cosmos/cosmos-sdk/pull/20114) Follow up of [GHSA-4j93-fm92-rp4m](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-4j93-fm92-rp4m) for `x/authz`. +* (crypto) [#19691](https://github.com/cosmos/cosmos-sdk/pull/19745) Fix tx sign doesn't throw an error when incorrect Ledger is used. +* (baseapp) [#19970](https://github.com/cosmos/cosmos-sdk/pull/19970) Fix default config values to use no-op mempool as default. +* (crypto) [#20027](https://github.com/cosmos/cosmos-sdk/pull/20027) secp256r1 keys now implement gogoproto's customtype interface. +* (x/bank) [#20028](https://github.com/cosmos/cosmos-sdk/pull/20028) Align query with multi denoms for send-enabled. + +## [v0.50.5](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.5) - 2024-03-12 + +### Features + +* (baseapp) [#19626](https://github.com/cosmos/cosmos-sdk/pull/19626) Add `DisableBlockGasMeter` option to `BaseApp`, which removes the block gas meter during transaction execution. + +### Improvements + +* (x/distribution) [#19707](https://github.com/cosmos/cosmos-sdk/pull/19707) Add autocli config for `DelegationTotalRewards` for CLI consistency with `q rewards` commands in previous versions. +* (x/auth) [#19651](https://github.com/cosmos/cosmos-sdk/pull/19651) Allow empty public keys in `GetSignBytesAdapter`. + +### Bug Fixes + +* (x/gov) [#19725](https://github.com/cosmos/cosmos-sdk/pull/19725) Fetch a failed proposal tally from proposal.FinalTallyResult in the gprc query. +* (types) [#19709](https://github.com/cosmos/cosmos-sdk/pull/19709) Fix skip staking genesis export when using `CoreAppModuleAdaptor` / `CoreAppModuleBasicAdaptor` for it. +* (x/auth) [#19549](https://github.com/cosmos/cosmos-sdk/pull/19549) Accept custom get signers when injecting `x/auth/tx`. +* (x/staking) Fix a possible bypass of delegator slashing: [GHSA-86h5-xcpx-cfqc](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-86h5-xcpx-cfqc) +* (baseapp) Fix a bug in `baseapp.ValidateVoteExtensions` helper ([GHSA-95rx-m9m5-m94v](https://github.com/cosmos/cosmos-sdk/security/advisories/GHSA-95rx-m9m5-m94v)). The helper has been fixed and for avoiding API breaking changes `currentHeight` and `chainID` arguments are ignored. Those arguments are removed from the helper in v0.51+. + +## [v0.50.4](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.4) - 2024-02-19 + +### Features + +* (server) [#19280](https://github.com/cosmos/cosmos-sdk/pull/19280) Adds in-place testnet CLI command. + +### Improvements + +* (client) [#19393](https://github.com/cosmos/cosmos-sdk/pull/19393/) Add `ReadDefaultValuesFromDefaultClientConfig` to populate the default values from the default client config in client.Context without creating a app folder. + +### Bug Fixes + +* (x/auth/vesting) [GHSA-4j93-fm92-rp4m](#bug-fixes) Add `BlockedAddr` check in `CreatePeriodicVestingAccount`. +* (baseapp) [#19338](https://github.com/cosmos/cosmos-sdk/pull/19338) Set HeaderInfo in context when calling `setState`. +* (baseapp): [#19200](https://github.com/cosmos/cosmos-sdk/pull/19200) Ensure that sdk side ve math matches cometbft. +* [#19106](https://github.com/cosmos/cosmos-sdk/pull/19106) Allow empty public keys when setting signatures. Public keys aren't needed for every transaction. +* (baseapp) [#19198](https://github.com/cosmos/cosmos-sdk/pull/19198) Remove usage of pointers in logs in all optimistic execution goroutines. +* (baseapp) [#19177](https://github.com/cosmos/cosmos-sdk/pull/19177) Fix baseapp `DefaultProposalHandler` same-sender non-sequential sequence. +* (crypto) [#19371](https://github.com/cosmos/cosmos-sdk/pull/19371) Avoid CLI redundant log in stdout, log to stderr instead. + +## [v0.50.3](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.3) - 2024-01-15 + +### Features + +* (types) [#18991](https://github.com/cosmos/cosmos-sdk/pull/18991) Add SignerExtractionAdapter to PriorityNonceMempool/Config and provide Default implementation matching existing behavior. +* (gRPC) [#19043](https://github.com/cosmos/cosmos-sdk/pull/19043) Add `halt_height` to the gRPC `/cosmos/base/node/v1beta1/config` request. + +### Improvements + +* (x/bank) [#18956](https://github.com/cosmos/cosmos-sdk/pull/18956) Introduced a new `DenomOwnersByQuery` query method for `DenomOwners`, which accepts the denom value as a query string parameter, resolving issues with denoms containing slashes. +* (x/gov) [#18707](https://github.com/cosmos/cosmos-sdk/pull/18707) Improve genesis validation. +* (x/auth/tx) [#18772](https://github.com/cosmos/cosmos-sdk/pull/18772) Remove misleading gas wanted from tx simulation failure log. +* (client/tx) [#18852](https://github.com/cosmos/cosmos-sdk/pull/18852) Add `WithFromName` to tx factory. +* (types) [#18888](https://github.com/cosmos/cosmos-sdk/pull/18888) Speedup DecCoin.Sort() if len(coins) <= 1 +* (types) [#18875](https://github.com/cosmos/cosmos-sdk/pull/18875) Speedup coins.Sort() if len(coins) <= 1 +* (baseapp) [#18915](https://github.com/cosmos/cosmos-sdk/pull/18915) Add a new `ExecModeVerifyVoteExtension` exec mode and ensure it's populated in the `Context` during `VerifyVoteExtension` execution. +* (testutil) [#18930](https://github.com/cosmos/cosmos-sdk/pull/18930) Add NodeURI for clientCtx. + +### Bug Fixes + +* (baseapp) [#19058](https://github.com/cosmos/cosmos-sdk/pull/19058) Fix baseapp posthandler branch would fail if the `runMsgs` had returned an error. +* (baseapp) [#18609](https://github.com/cosmos/cosmos-sdk/issues/18609) Fixed accounting in the block gas meter after module's beginBlock and before DeliverTx, ensuring transaction processing always starts with the expected zeroed out block gas meter. +* (baseapp) [#18895](https://github.com/cosmos/cosmos-sdk/pull/18895) Fix de-duplicating vote extensions during validation in ValidateVoteExtensions. + +## [v0.50.2](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.2) - 2023-12-11 + +### Features + +* (debug) [#18219](https://github.com/cosmos/cosmos-sdk/pull/18219) Add debug commands for application codec types. +* (client/keys) [#17639](https://github.com/cosmos/cosmos-sdk/pull/17639) Allows using and saving public keys encoded as base64. +* (server) [#17094](https://github.com/cosmos/cosmos-sdk/pull/17094) Add a `shutdown-grace` flag for waiting a given time before exit. + +### Improvements + +* (telemetry) [#18646] (https://github.com/cosmos/cosmos-sdk/pull/18646) Enable statsd and dogstatsd telemetry sinks. +* (server) [#18478](https://github.com/cosmos/cosmos-sdk/pull/18478) Add command flag to disable colored logs. +* (x/gov) [#18025](https://github.com/cosmos/cosmos-sdk/pull/18025) Improve ` q gov proposer` by querying directly a proposal instead of tx events. It is an alias of `q gov proposal` as the proposer is a field of the proposal. +* (version) [#18063](https://github.com/cosmos/cosmos-sdk/pull/18063) Allow to define extra info to be displayed in ` version --long` command. +* (codec/unknownproto)[#18541](https://github.com/cosmos/cosmos-sdk/pull/18541) Remove the use of "protoc-gen-gogo/descriptor" in favour of using the official protobuf descriptorpb types inside unknownproto. + +### Bug Fixes + +* (x/auth) [#18564](https://github.com/cosmos/cosmos-sdk/pull/18564) Fix total fees calculation when batch signing. +* (server) [#18537](https://github.com/cosmos/cosmos-sdk/pull/18537) Fix panic when defining minimum gas config as `100stake;100uatom`. Use a `,` delimiter instead of `;`. Fixes the server config getter to use the correct delimiter. +* [#18531](https://github.com/cosmos/cosmos-sdk/pull/18531) Baseapp's `GetConsensusParams` returns an empty struct instead of panicking if no params are found. +* (client/tx) [#18472](https://github.com/cosmos/cosmos-sdk/pull/18472) Utilizes the correct Pubkey when simulating a transaction. +* (baseapp) [#18486](https://github.com/cosmos/cosmos-sdk/pull/18486) Fixed FinalizeBlock calls not being passed to ABCIListeners. +* (baseapp) [#18627](https://github.com/cosmos/cosmos-sdk/pull/18627) Post handlers are run on non successful transaction executions too. +* (baseapp) [#18654](https://github.com/cosmos/cosmos-sdk/pull/18654) Fixes an issue in which `gogoproto.Merge` does not work with gogoproto messages with custom types. + +## [v0.50.1](https://github.com/cosmos/cosmos-sdk/releases/tag/v0.50.1) - 2023-11-07 + +> v0.50.0 has been retracted due to a mistake in tagging the release. Please use v0.50.1 instead. + +### Features + +* (baseapp) [#18071](https://github.com/cosmos/cosmos-sdk/pull/18071) Add hybrid handlers to `MsgServiceRouter`. +* (server) [#18162](https://github.com/cosmos/cosmos-sdk/pull/18162) Start gRPC & API server in standalone mode. +* (baseapp & types) [#17712](https://github.com/cosmos/cosmos-sdk/pull/17712) Introduce `PreBlock`, which runs before begin blocker other modules, and allows to modify consensus parameters, and the changes are visible to the following state machine logics. Additionally it can be used for vote extensions. +* (genutil) [#17571](https://github.com/cosmos/cosmos-sdk/pull/17571) Allow creation of `AppGenesis` without a file lookup. +* (codec) [#17042](https://github.com/cosmos/cosmos-sdk/pull/17042) Add `CollValueV2` which supports encoding of protov2 messages in collections. +* (x/gov) [#16976](https://github.com/cosmos/cosmos-sdk/pull/16976) Add `failed_reason` field to `Proposal` under `x/gov` to indicate the reason for a failed proposal. Referenced from [#238](https://github.com/bnb-chain/greenfield-cosmos-sdk/pull/238) under `bnb-chain/greenfield-cosmos-sdk`. +* (baseapp) [#16898](https://github.com/cosmos/cosmos-sdk/pull/16898) Add `preFinalizeBlockHook` to allow vote extensions persistence. +* (cli) [#16887](https://github.com/cosmos/cosmos-sdk/pull/16887) Add two new CLI commands: ` tx simulate` for simulating a transaction; ` query block-results` for querying CometBFT RPC for block results. +* (x/bank) [#16852](https://github.com/cosmos/cosmos-sdk/pull/16852) Add `DenomMetadataByQueryString` query in bank module to support metadata query by query string. +* (baseapp) [#16581](https://github.com/cosmos/cosmos-sdk/pull/16581) Implement Optimistic Execution as an experimental feature (not enabled by default). +* (types) [#16257](https://github.com/cosmos/cosmos-sdk/pull/16257) Allow setting the base denom in the denom registry. +* (baseapp) [#16239](https://github.com/cosmos/cosmos-sdk/pull/16239) Add Gas Limits to allow node operators to resource bound queries. +* (cli) [#16209](https://github.com/cosmos/cosmos-sdk/pull/16209) Make `StartCmd` more customizable. +* (types/simulation) [#16074](https://github.com/cosmos/cosmos-sdk/pull/16074) Add generic SimulationStoreDecoder for modules using collections. +* (genutil) [#16046](https://github.com/cosmos/cosmos-sdk/pull/16046) Add "module-name" flag to genutil `add-genesis-account` to enable intializing module accounts at genesis.* [#15970](https://github.com/cosmos/cosmos-sdk/pull/15970) Enable SIGN_MODE_TEXTUAL. +* (types) [#15958](https://github.com/cosmos/cosmos-sdk/pull/15958) Add `module.NewBasicManagerFromManager` for creating a basic module manager from a module manager. +* (types/module) [#15829](https://github.com/cosmos/cosmos-sdk/pull/15829) Add new endblocker interface to handle valset updates. +* (runtime) [#15818](https://github.com/cosmos/cosmos-sdk/pull/15818) Provide logger through `depinject` instead of appBuilder. +* (types) [#15735](https://github.com/cosmos/cosmos-sdk/pull/15735) Make `ValidateBasic() error` method of `Msg` interface optional. Modules should validate messages directly in their message handlers ([RFC 001](https://docs.cosmos.network/main/rfc/rfc-001-tx-validation)). +* (x/genutil) [#15679](https://github.com/cosmos/cosmos-sdk/pull/15679) Allow applications to specify a custom genesis migration function for the `genesis migrate` command. +* (telemetry) [#15657](https://github.com/cosmos/cosmos-sdk/pull/15657) Emit more data (go version, sdk version, upgrade height) in prom metrics. +* (client) [#15597](https://github.com/cosmos/cosmos-sdk/pull/15597) Add status endpoint for clients. +* (testutil/integration) [#15556](https://github.com/cosmos/cosmos-sdk/pull/15556) Introduce `testutil/integration` package for module integration testing. +* (runtime) [#15547](https://github.com/cosmos/cosmos-sdk/pull/15547) Allow runtime to pass event core api service to modules. +* (client) [#15458](https://github.com/cosmos/cosmos-sdk/pull/15458) Add a `CmdContext` field to client.Context initialized to cobra command's context. +* (x/genutil) [#15301](https://github.com/cosmos/cosmos-sdk/pull/15031) Add application genesis. The genesis is now entirely managed by the application and passed to CometBFT at note instantiation. Functions that were taking a `cmttypes.GenesisDoc{}` now takes a `genutiltypes.AppGenesis{}`. +* (core) [#15133](https://github.com/cosmos/cosmos-sdk/pull/15133) Implement RegisterServices in the module manager. +* (x/bank) [#14894](https://github.com/cosmos/cosmos-sdk/pull/14894) Return a human readable denomination for IBC vouchers when querying bank balances. Added a `ResolveDenom` parameter to `types.QueryAllBalancesRequest` and `--resolve-denom` flag to `GetBalancesCmd()`. +* (core) [#14860](https://github.com/cosmos/cosmos-sdk/pull/14860) Add `Precommit` and `PrepareCheckState` AppModule callbacks. +* (x/gov) [#14720](https://github.com/cosmos/cosmos-sdk/pull/14720) Upstream expedited proposals from Osmosis. +* (cli) [#14659](https://github.com/cosmos/cosmos-sdk/pull/14659) Added ability to query blocks by events with queries directly passed to Tendermint, which will allow for full query operator support, e.g. `>`. +* (x/auth) [#14650](https://github.com/cosmos/cosmos-sdk/pull/14650) Add Textual SignModeHandler. Enable `SIGN_MODE_TEXTUAL` by following the [UPGRADING.md](./UPGRADING.md) instructions. +* (x/crisis) [#14588](https://github.com/cosmos/cosmos-sdk/pull/14588) Use CacheContext() in AssertInvariants(). +* (mempool) [#14484](https://github.com/cosmos/cosmos-sdk/pull/14484) Add priority nonce mempool option for transaction replacement. +* (query) [#14468](https://github.com/cosmos/cosmos-sdk/pull/14468) Implement pagination for collections. +* (x/gov) [#14373](https://github.com/cosmos/cosmos-sdk/pull/14373) Add new proto field `constitution` of type `string` to gov module genesis state, which allows chain builders to lay a strong foundation by specifying purpose. +* (client) [#14342](https://github.com/cosmos/cosmos-sdk/pull/14342) Add ` config` command is now a sub-command, for setting, getting and migrating Cosmos SDK configuration files. +* (x/distribution) [#14322](https://github.com/cosmos/cosmos-sdk/pull/14322) Introduce a new gRPC message handler, `DepositValidatorRewardsPool`, that allows explicit funding of a validator's reward pool. +* (x/bank) [#14224](https://github.com/cosmos/cosmos-sdk/pull/14224) Allow injection of restrictions on transfers using `AppendSendRestriction` or `PrependSendRestriction`. + +### Improvements + +* (x/gov) [#18189](https://github.com/cosmos/cosmos-sdk/pull/18189) Limit the accepted deposit coins for a proposal to the minimum proposal deposit denoms. +* (x/staking) [#18049](https://github.com/cosmos/cosmos-sdk/pull/18049) Return early if Slash encounters zero tokens to burn. +* (x/staking) [#18035](https://github.com/cosmos/cosmos-sdk/pull/18035) Hoisted out of the redelegation loop, the non-changing validator and delegator addresses parsing. +* (keyring) [#17913](https://github.com/cosmos/cosmos-sdk/pull/17913) Add `NewAutoCLIKeyring` for creating an AutoCLI keyring from a SDK keyring. +* (x/consensus) [#18041](https://github.com/cosmos/cosmos-sdk/pull/18041) Let `ToProtoConsensusParams()` return an error. +* (x/gov) [#17780](https://github.com/cosmos/cosmos-sdk/pull/17780) Recover panics and turn them into errors when executing x/gov proposals. +* (baseapp) [#17667](https://github.com/cosmos/cosmos-sdk/pull/17667) Close databases opened by SDK in `baseApp.Close()`. +* (types/module) [#17554](https://github.com/cosmos/cosmos-sdk/pull/17554) Introduce `HasABCIGenesis` which is implemented by a module only when a validatorset update needs to be returned. +* (cli) [#17389](https://github.com/cosmos/cosmos-sdk/pull/17389) gRPC CometBFT commands have been added under ` q consensus comet`. CometBFT commands placement in the SDK has been simplified. See the exhaustive list below. + * `client/rpc.StatusCommand()` is now at `server.StatusCommand()` +* (testutil) [#17216](https://github.com/cosmos/cosmos-sdk/issues/17216) Add `DefaultContextWithKeys` to `testutil` package. +* (cli) [#17187](https://github.com/cosmos/cosmos-sdk/pull/17187) Do not use `ctx.PrintObjectLegacy` in commands anymore. + * ` q gov proposer [proposal-id]` now returns a proposal id as int instead of string. +* (x/staking) [#17164](https://github.com/cosmos/cosmos-sdk/pull/17164) Add `BondedTokensAndPubKeyByConsAddr` to the keeper to enable vote extension verification. +* (x/group, x/gov) [#17109](https://github.com/cosmos/cosmos-sdk/pull/17109) Let proposal summary be 40x longer than metadata limit. +* (version) [#17096](https://github.com/cosmos/cosmos-sdk/pull/17096) Improve `getSDKVersion()` to handle module replacements. +* (types) [#16890](https://github.com/cosmos/cosmos-sdk/pull/16890) Remove `GetTxCmd() *cobra.Command` and `GetQueryCmd() *cobra.Command` from `module.AppModuleBasic` interface. +* (x/authz) [#16869](https://github.com/cosmos/cosmos-sdk/pull/16869) Improve error message when grant not found. +* (all) [#16497](https://github.com/cosmos/cosmos-sdk/pull/16497) Removed all exported vestiges of `sdk.MustSortJSON` and `sdk.SortJSON`. +* (server) [#16238](https://github.com/cosmos/cosmos-sdk/pull/16238) Don't setup p2p node keys if starting a node in GRPC only mode. +* (cli) [#16206](https://github.com/cosmos/cosmos-sdk/pull/16206) Make ABCI handshake profileable. +* (types) [#16076](https://github.com/cosmos/cosmos-sdk/pull/16076) Optimize `ChainAnteDecorators`/`ChainPostDecorators` to instantiate the functions once instead of on every invocation of the returned `AnteHandler`/`PostHandler`. +* (server) [#16071](https://github.com/cosmos/cosmos-sdk/pull/16071) When `mempool.max-txs` is set to a negative value, use a no-op mempool (effectively disable the app mempool). +* (types/query) [#16041](https://github.com/cosmos/cosmos-sdk/pull/16041) Change pagination max limit to a variable in order to be modifed by application devs. +* (simapp) [#15958](https://github.com/cosmos/cosmos-sdk/pull/15958) Refactor SimApp for removing the global basic manager. +* (all modules) [#15901](https://github.com/cosmos/cosmos-sdk/issues/15901) All core Cosmos SDK modules query commands have migrated to [AutoCLI](https://docs.cosmos.network/main/core/autocli), ensuring parity between gRPC and CLI queries. +* (x/auth) [#15867](https://github.com/cosmos/cosmos-sdk/pull/15867) Support better logging for signature verification failure. +* (store/cachekv) [#15767](https://github.com/cosmos/cosmos-sdk/pull/15767) Reduce peak RAM usage during and after `InitGenesis`. +* (x/bank) [#15764](https://github.com/cosmos/cosmos-sdk/pull/15764) Speedup x/bank `InitGenesis`. +* (x/slashing) [#15580](https://github.com/cosmos/cosmos-sdk/pull/15580) Refactor the validator's missed block signing window to be a chunked bitmap instead of a "logical" bitmap, significantly reducing the storage footprint. +* (x/gov) [#15554](https://github.com/cosmos/cosmos-sdk/pull/15554) Add proposal result log in `active_proposal` event. When a proposal passes but fails to execute, the proposal result is logged in the `active_proposal` event. +* (x/consensus) [#15553](https://github.com/cosmos/cosmos-sdk/pull/15553) Migrate consensus module to use collections. +* (server) [#15358](https://github.com/cosmos/cosmos-sdk/pull/15358) Add `server.InterceptConfigsAndCreateContext` as alternative to `server.InterceptConfigsPreRunHandler` which does not set the server context and the default SDK logger. +* (mempool) [#15328](https://github.com/cosmos/cosmos-sdk/pull/15328) Improve the `PriorityNonceMempool`: + * Support generic transaction prioritization, instead of `ctx.Priority()` + * Improve construction through the use of a single `PriorityNonceMempoolConfig` instead of option functions +* (x/authz) [#15164](https://github.com/cosmos/cosmos-sdk/pull/15164) Add `MsgCancelUnbondingDelegation` to staking authorization. +* (server) [#15041](https://github.com/cosmos/cosmos-sdk/pull/15041) Remove unnecessary sleeps from gRPC and API server initiation. The servers will start and accept requests as soon as they're ready. +* (baseapp) [#15023](https://github.com/cosmos/cosmos-sdk/pull/15023) & [#15213](https://github.com/cosmos/cosmos-sdk/pull/15213) Add `MessageRouter` interface to baseapp and pass it to authz, gov and groups instead of concrete type. +* [#15011](https://github.com/cosmos/cosmos-sdk/pull/15011) Introduce `cosmossdk.io/log` package to provide a consistent logging interface through the SDK. CometBFT logger is now replaced by `cosmossdk.io/log.Logger`. +* (x/staking) [#14864](https://github.com/cosmos/cosmos-sdk/pull/14864) ` tx staking create-validator` CLI command now takes a json file as an arg instead of using required flags. +* (x/auth) [#14758](https://github.com/cosmos/cosmos-sdk/pull/14758) Allow transaction event queries to directly passed to Tendermint, which will allow for full query operator support, e.g. `>`. +* (x/evidence) [#14757](https://github.com/cosmos/cosmos-sdk/pull/14757) Evidence messages do not need to implement a `.Type()` anymore. +* (x/auth/tx) [#14751](https://github.com/cosmos/cosmos-sdk/pull/14751) Remove `.Type()` and `Route()` methods from all msgs and `legacytx.LegacyMsg` interface. +* (cli) [#14659](https://github.com/cosmos/cosmos-sdk/pull/14659) Added ability to query blocks by either height/hash ` q block --type=height|hash `. +* (x/staking) [#14590](https://github.com/cosmos/cosmos-sdk/pull/14590) Return undelegate amount in MsgUndelegateResponse. +* [#14529](https://github.com/cosmos/cosmos-sdk/pull/14529) Add new property `BondDenom` to `SimulationState` struct. +* (store) [#14439](https://github.com/cosmos/cosmos-sdk/pull/14439) Remove global metric gatherer from store. + * By default store has a no op metric gatherer, the application developer must set another metric gatherer or us the provided one in `store/metrics`. +* (store) [#14438](https://github.com/cosmos/cosmos-sdk/pull/14438) Pass logger from baseapp to store. +* (baseapp) [#14417](https://github.com/cosmos/cosmos-sdk/pull/14417) The store package no longer has a dependency on baseapp. +* (module) [#14415](https://github.com/cosmos/cosmos-sdk/pull/14415) Loosen assertions in SetOrderBeginBlockers() and SetOrderEndBlockers(). +* (store) [#14410](https://github.com/cosmos/cosmos-sdk/pull/14410) `rootmulti.Store.loadVersion` has validation to check if all the module stores' height is correct, it will error if any module store has incorrect height. +* [#14406](https://github.com/cosmos/cosmos-sdk/issues/14406) Migrate usage of `types/store.go` to `store/types/..`. +* (context)[#14384](https://github.com/cosmos/cosmos-sdk/pull/14384) Refactor(context): Pass EventManager to the context as an interface. +* (types) [#14354](https://github.com/cosmos/cosmos-sdk/pull/14354) Improve performance on Context.KVStore and Context.TransientStore by 40%. +* (crypto/keyring) [#14151](https://github.com/cosmos/cosmos-sdk/pull/14151) Move keys presentation from `crypto/keyring` to `client/keys` +* (signing) [#14087](https://github.com/cosmos/cosmos-sdk/pull/14087) Add SignModeHandlerWithContext interface with a new `GetSignBytesWithContext` to get the sign bytes using `context.Context` as an argument to access state. +* (server) [#14062](https://github.com/cosmos/cosmos-sdk/pull/14062) Remove rosetta from server start. +* (crypto) [#3129](https://github.com/cosmos/cosmos-sdk/pull/3129) New armor and keyring key derivation uses aead and encryption uses chacha20poly. + +### State Machine Breaking + +* (x/gov) [#18146](https://github.com/cosmos/cosmos-sdk/pull/18146) Add denom check to reject denoms outside of those listed in `MinDeposit`. A new `MinDepositRatio` param is added (with a default value of `0.001`) and now deposits are required to be at least `MinDepositRatio*MinDeposit` to be accepted. +* (x/group,x/gov) [#16235](https://github.com/cosmos/cosmos-sdk/pull/16235) A group and gov proposal is rejected if the proposal metadata title and summary do not match the proposal title and summary. +* (baseapp) [#15930](https://github.com/cosmos/cosmos-sdk/pull/15930) change vote info provided by prepare and process proposal to the one in the block. +* (x/staking) [#15731](https://github.com/cosmos/cosmos-sdk/pull/15731) Introducing a new index to retrieve the delegations by validator efficiently. +* (x/staking) [#15701](https://github.com/cosmos/cosmos-sdk/pull/15701) The `HistoricalInfoKey` has been updated to use a binary format. +* (x/slashing) [#15580](https://github.com/cosmos/cosmos-sdk/pull/15580) The validator slashing window now stores "chunked" bitmap entries for each validator's signing window instead of a single boolean entry per signing window index. +* (x/staking) [#14590](https://github.com/cosmos/cosmos-sdk/pull/14590) `MsgUndelegateResponse` now includes undelegated amount. `x/staking` module's `keeper.Undelegate` now returns 3 values (completionTime,undelegateAmount,error) instead of 2. +* (x/feegrant) [#14294](https://github.com/cosmos/cosmos-sdk/pull/14294) Moved the logic of rejecting duplicate grant from `msg_server` to `keeper` method. + +### API Breaking Changes + +* (x/auth) [#17787](https://github.com/cosmos/cosmos-sdk/pull/17787) Remove Tip functionality. +* (types) `module.EndBlockAppModule` has been replaced by Core API `appmodule.HasEndBlocker` or `module.HasABCIEndBlock` when needing validator updates. +* (types) `module.BeginBlockAppModule` has been replaced by Core API `appmodule.HasBeginBlocker`. +* (types) [#17358](https://github.com/cosmos/cosmos-sdk/pull/17358) Remove deprecated `sdk.Handler`, use `baseapp.MsgServiceHandler` instead. +* (client) [#17197](https://github.com/cosmos/cosmos-sdk/pull/17197) `keys.Commands` does not take a home directory anymore. It is inferred from the root command. +* (x/staking) [#17157](https://github.com/cosmos/cosmos-sdk/pull/17157) `GetValidatorsByPowerIndexKey` and `ValidateBasic` for historical info takes a validator address codec in order to be able to decode/encode addresses. + * `GetOperator()` now returns the address as it is represented in state, by default this is an encoded address + * `GetConsAddr() ([]byte, error)` returns `[]byte` instead of sdk.ConsAddres. + * `FromABCIEvidence` & `GetConsensusAddress(consAc address.Codec)` now take a consensus address codec to be able to decode the incoming address. + * (x/distribution) `Delegate` & `SlashValidator` helper function added the mock staking keeper as a parameter passed to the function +* (x/staking) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `NewMsgCreateValidator`, `NewValidator`, `NewMsgCancelUnbondingDelegation`, `NewMsgUndelegate`, `NewMsgBeginRedelegate`, `NewMsgDelegate` and `NewMsgEditValidator` takes a string instead of `sdk.ValAddress` or `sdk.AccAddress`: + * `NewRedelegation` and `NewUnbondingDelegation` takes a validatorAddressCodec and a delegatorAddressCodec in order to decode the addresses. + * `NewRedelegationResponse` takes a string instead of `sdk.ValAddress` or `sdk.AccAddress`. + * `NewMsgCreateValidator.Validate()` takes an address codec in order to decode the address. + * `BuildCreateValidatorMsg` takes a ValidatorAddressCodec in order to decode addresses. +* (x/slashing) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `NewMsgUnjail` takes a string instead of `sdk.ValAddress` +* (x/genutil) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `GenAppStateFromConfig`, AddGenesisAccountCmd and `GenTxCmd` takes an addresscodec to decode addresses. +* (x/distribution) [#17098](https://github.com/cosmos/cosmos-sdk/pull/17098) `NewMsgDepositValidatorRewardsPool`, `NewMsgFundCommunityPool`, `NewMsgWithdrawValidatorCommission` and `NewMsgWithdrawDelegatorReward` takes a string instead of `sdk.ValAddress` or `sdk.AccAddress`. +* (x/staking) [#16959](https://github.com/cosmos/cosmos-sdk/pull/16959) Add validator and consensus address codec as staking keeper arguments. +* (x/staking) [#16958](https://github.com/cosmos/cosmos-sdk/pull/16958) DelegationI interface `GetDelegatorAddr` & `GetValidatorAddr` have been migrated to return string instead of sdk.AccAddress and sdk.ValAddress respectively. stakingtypes.NewDelegation takes a string instead of sdk.AccAddress and sdk.ValAddress. +* (testutil) [#16899](https://github.com/cosmos/cosmos-sdk/pull/16899) The *cli testutil* `QueryBalancesExec` has been removed. Use the gRPC or REST query instead. +* (x/staking) [#16795](https://github.com/cosmos/cosmos-sdk/pull/16795) `DelegationToDelegationResponse`, `DelegationsToDelegationResponses`, `RedelegationsToRedelegationResponses` are no longer exported. +* (x/auth/vesting) [#16741](https://github.com/cosmos/cosmos-sdk/pull/16741) Vesting account constructor now return an error with the result of their validate function. +* (x/auth) [#16650](https://github.com/cosmos/cosmos-sdk/pull/16650) The *cli testutil* `QueryAccountExec` has been removed. Use the gRPC or REST query instead. +* (x/auth) [#16621](https://github.com/cosmos/cosmos-sdk/pull/16621) Pass address codec to auth new keeper constructor. +* (x/auth) [#16423](https://github.com/cosmos/cosmos-sdk/pull/16423) `helpers.AddGenesisAccount` has been moved to `x/genutil` to remove the cyclic dependency between `x/auth` and `x/genutil`. +* (baseapp) [#16342](https://github.com/cosmos/cosmos-sdk/pull/16342) NewContext was renamed to NewContextLegacy. The replacement (NewContext) now does not take a header, instead you should set the header via `WithHeaderInfo` or `WithBlockHeight`. Note that `WithBlockHeight` will soon be depreacted and its recommneded to use `WithHeaderInfo`. +* (x/mint) [#16329](https://github.com/cosmos/cosmos-sdk/pull/16329) Use collections for state management: + * Removed: keeper `GetParams`, `SetParams`, `GetMinter`, `SetMinter`. +* (x/crisis) [#16328](https://github.com/cosmos/cosmos-sdk/pull/16328) Use collections for state management: + * Removed: keeper `GetConstantFee`, `SetConstantFee` +* (x/staking) [#16324](https://github.com/cosmos/cosmos-sdk/pull/16324) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. Notable changes: + * `Validator` method now returns `types.ErrNoValidatorFound` instead of `nil` when not found. +* (x/distribution) [#16302](https://github.com/cosmos/cosmos-sdk/pull/16302) Use collections for FeePool state management. + * Removed: keeper `GetFeePool`, `SetFeePool`, `GetFeePoolCommunityCoins` +* (types) [#16272](https://github.com/cosmos/cosmos-sdk/pull/16272) `FeeGranter` in the `FeeTx` interface returns `[]byte` instead of `string`. +* (x/gov) [#16268](https://github.com/cosmos/cosmos-sdk/pull/16268) Use collections for proposal state management (part 2): + * this finalizes the gov collections migration + * Removed: types all the key related functions + * Removed: keeper `InsertActiveProposalsQueue`, `RemoveActiveProposalsQueue`, `InsertInactiveProposalsQueue`, `RemoveInactiveProposalsQueue`, `IterateInactiveProposalsQueue`, `IterateActiveProposalsQueue`, `ActiveProposalsQueueIterator`, `InactiveProposalsQueueIterator` +* (x/slashing) [#16246](https://github.com/cosmos/cosmos-sdk/issues/16246) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. `GetValidatorSigningInfo` now returns an error instead of a `found bool`, the error can be `nil` (found), `ErrNoSigningInfoFound` (not found) and any other error. +* (module) [#16227](https://github.com/cosmos/cosmos-sdk/issues/16227) `manager.RunMigrations()` now take a `context.Context` instead of a `sdk.Context`. +* (x/crisis) [#16216](https://github.com/cosmos/cosmos-sdk/issues/16216) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error` instead of panicking. +* (x/distribution) [#16211](https://github.com/cosmos/cosmos-sdk/pull/16211) Use collections for params state management. +* (cli) [#16209](https://github.com/cosmos/cosmos-sdk/pull/16209) Add API `StartCmdWithOptions` to create customized start command. +* (x/mint) [#16179](https://github.com/cosmos/cosmos-sdk/issues/16179) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error`. +* (x/gov) [#16171](https://github.com/cosmos/cosmos-sdk/pull/16171) Use collections for proposal state management (part 1): + * Removed: keeper: `GetProposal`, `UnmarshalProposal`, `MarshalProposal`, `IterateProposal`, `GetProposal`, `GetProposalFiltered`, `GetProposals`, `GetProposalID`, `SetProposalID` + * Removed: errors unused errors +* (x/gov) [#16164](https://github.com/cosmos/cosmos-sdk/pull/16164) Use collections for vote state management: + * Removed: types `VoteKey`, `VoteKeys` + * Removed: keeper `IterateVotes`, `IterateAllVotes`, `GetVotes`, `GetVote`, `SetVote` +* (sims) [#16155](https://github.com/cosmos/cosmos-sdk/pull/16155) + * `simulation.NewOperationMsg` now marshals the operation msg as proto bytes instead of legacy amino JSON bytes. + * `simulation.NewOperationMsg` is now 2-arity instead of 3-arity with the obsolete argument `codec.ProtoCodec` removed. + * The field `OperationMsg.Msg` is now of type `[]byte` instead of `json.RawMessage`. +* (x/gov) [#16127](https://github.com/cosmos/cosmos-sdk/pull/16127) Use collections for deposit state management: + * The following methods are removed from the gov keeper: `GetDeposit`, `GetAllDeposits`, `IterateAllDeposits`. + * The following functions are removed from the gov types: `DepositKey`, `DepositsKey`. +* (x/gov) [#16118](https://github.com/cosmos/cosmos-sdk/pull/16118/) Use collections for constituion and params state management. +* (x/gov) [#16106](https://github.com/cosmos/cosmos-sdk/pull/16106) Remove gRPC query methods from gov keeper. +* (x/*all*) [#16052](https://github.com/cosmos/cosmos-sdk/pull/16062) `GetSignBytes` implementations on messages and global legacy amino codec definitions have been removed from all modules. +* (sims) [#16052](https://github.com/cosmos/cosmos-sdk/pull/16062) `GetOrGenerate` no longer requires a codec argument is now 4-arity instead of 5-arity. +* (types/math) [#16040](https://github.com/cosmos/cosmos-sdk/pull/16798) Remove aliases in `types/math.go` (part 2). +* (types/math) [#16040](https://github.com/cosmos/cosmos-sdk/pull/16040) Remove aliases in `types/math.go` (part 1). +* (x/auth) [#16016](https://github.com/cosmos/cosmos-sdk/pull/16016) Use collections for accounts state management: + * removed: keeper `HasAccountByID`, `AccountAddressByID`, `SetParams +* (x/genutil) [#15999](https://github.com/cosmos/cosmos-sdk/pull/15999) Genutil now takes the `GenesisTxHanlder` interface instead of deliverTx. The interface is implemented on baseapp +* (x/gov) [#15988](https://github.com/cosmos/cosmos-sdk/issues/15988) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context` and return an `error` (instead of panicking or returning a `found bool`). Iterators callback functions now return an error instead of a `bool`. +* (x/auth) [#15985](https://github.com/cosmos/cosmos-sdk/pull/15985) The `AccountKeeper` does not expose the `QueryServer` and `MsgServer` APIs anymore. +* (x/authz) [#15962](https://github.com/cosmos/cosmos-sdk/issues/15962) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`, methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. The `Authorization` interface's `Accept` method now takes a `context.Context` instead of a `sdk.Context`. +* (x/distribution) [#15948](https://github.com/cosmos/cosmos-sdk/issues/15948) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. Keeper methods also now return an `error`. +* (x/bank) [#15891](https://github.com/cosmos/cosmos-sdk/issues/15891) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. Also `FundAccount` and `FundModuleAccount` from the `testutil` package accept a `context.Context` instead of a `sdk.Context`, and it's position was moved to the first place. +* (x/slashing) [#15875](https://github.com/cosmos/cosmos-sdk/pull/15875) `x/slashing.NewAppModule` now requires an `InterfaceRegistry` parameter. +* (x/crisis) [#15852](https://github.com/cosmos/cosmos-sdk/pull/15852) Crisis keeper now takes a instance of the address codec to be able to decode user addresses +* (x/auth) [#15822](https://github.com/cosmos/cosmos-sdk/pull/15822) The type of struct field `ante.HandlerOptions.SignModeHandler` has been changed to `x/tx/signing.HandlerMap`. +* (client) [#15822](https://github.com/cosmos/cosmos-sdk/pull/15822) The return type of the interface method `TxConfig.SignModeHandler` has been changed to `x/tx/signing.HandlerMap`. + * The signature of `VerifySignature` has been changed to accept a `x/tx/signing.HandlerMap` and other structs from `x/tx` as arguments. + * The signature of `NewTxConfigWithTextual` has been deprecated and its signature changed to accept a `SignModeOptions`. + * The signature of `NewSigVerificationDecorator` has been changed to accept a `x/tx/signing.HandlerMap`. +* (x/bank) [#15818](https://github.com/cosmos/cosmos-sdk/issues/15818) `BaseViewKeeper`'s `Logger` method now doesn't require a context. `NewBaseKeeper`, `NewBaseSendKeeper` and `NewBaseViewKeeper` now also require a `log.Logger` to be passed in. +* (x/genutil) [#15679](https://github.com/cosmos/cosmos-sdk/pull/15679) `MigrateGenesisCmd` now takes a `MigrationMap` instead of having the SDK genesis migration hardcoded. +* (client) [#15673](https://github.com/cosmos/cosmos-sdk/pull/15673) Move `client/keys.OutputFormatJSON` and `client/keys.OutputFormatText` to `client/flags` package. +* (x/*all*) [#15648](https://github.com/cosmos/cosmos-sdk/issues/15648) Make `SetParams` consistent across all modules and validate the params at the message handling instead of `SetParams` method. +* (codec) [#15600](https://github.com/cosmos/cosmos-sdk/pull/15600) [#15873](https://github.com/cosmos/cosmos-sdk/pull/15873) add support for getting signers to `codec.Codec` and `InterfaceRegistry`: + * `InterfaceRegistry` is has unexported methods and implements `protodesc.Resolver` plus the `RangeFiles` and `SigningContext` methods. All implementations of `InterfaceRegistry` by other users must now embed the official implementation. + * `Codec` has new methods `InterfaceRegistry`, `GetMsgAnySigners`, `GetMsgV1Signers`, and `GetMsgV2Signers` as well as unexported methods. All implementations of `Codec` by other users must now embed an official implementation from the `codec` package. + * `AminoCodec` is marked as deprecated and no longer implements `Codec. +* (client) [#15597](https://github.com/cosmos/cosmos-sdk/pull/15597) `RegisterNodeService` now requires a config parameter. +* (x/nft) [#15588](https://github.com/cosmos/cosmos-sdk/pull/15588) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. +* (baseapp) [#15568](https://github.com/cosmos/cosmos-sdk/pull/15568) `SetIAVLLazyLoading` is removed from baseapp. +* (x/genutil) [#15567](https://github.com/cosmos/cosmos-sdk/pull/15567) `CollectGenTxsCmd` & `GenTxCmd` takes a address.Codec to be able to decode addresses. +* (x/bank) [#15567](https://github.com/cosmos/cosmos-sdk/pull/15567) `GenesisBalance.GetAddress` now returns a string instead of `sdk.AccAddress` + * `MsgSendExec` test helper function now takes a address.Codec +* (x/auth) [#15520](https://github.com/cosmos/cosmos-sdk/pull/15520) `NewAccountKeeper` now takes a `KVStoreService` instead of a `StoreKey` and methods in the `Keeper` now take a `context.Context` instead of a `sdk.Context`. +* (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) `runTxMode`s were renamed to `execMode`. `ModeDeliver` as changed to `ModeFinalize` and a new `ModeVoteExtension` was added for vote extensions. +* (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) Writing of state to the multistore was moved to `FinalizeBlock`. `Commit` still handles the committing values to disk. +* (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) Calls to BeginBlock and EndBlock have been replaced with core api beginblock & endblock. +* (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) BeginBlock and EndBlock are now internal to baseapp. For testing, user must call `FinalizeBlock`. BeginBlock and EndBlock calls are internal to Baseapp. +* (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) All calls to ABCI methods now accept a pointer of the abci request and response types +* (x/consensus) [#15517](https://github.com/cosmos/cosmos-sdk/pull/15517) `NewKeeper` now takes a `KVStoreService` instead of a `StoreKey`. +* (x/bank) [#15477](https://github.com/cosmos/cosmos-sdk/pull/15477) `banktypes.NewMsgMultiSend` and `keeper.InputOutputCoins` only accept one input. +* (server) [#15358](https://github.com/cosmos/cosmos-sdk/pull/15358) Remove `server.ErrorCode` that was not used anywhere. +* (x/capability) [#15344](https://github.com/cosmos/cosmos-sdk/pull/15344) Capability module was removed and is now housed in [IBC-GO](https://github.com/cosmos/ibc-go). +* (mempool) [#15328](https://github.com/cosmos/cosmos-sdk/pull/15328) The `PriorityNonceMempool` is now generic over type `C comparable` and takes a single `PriorityNonceMempoolConfig[C]` argument. See `DefaultPriorityNonceMempoolConfig` for how to construct the configuration and a `TxPriority` type. +* [#15299](https://github.com/cosmos/cosmos-sdk/pull/15299) Remove `StdTx` transaction and signing APIs. No SDK version has actually supported `StdTx` since before Stargate. +* [#15284](https://github.com/cosmos/cosmos-sdk/pull/15284) +* (x/gov) [#15284](https://github.com/cosmos/cosmos-sdk/pull/15284) `NewKeeper` now requires `codec.Codec`. +* (x/authx) [#15284](https://github.com/cosmos/cosmos-sdk/pull/15284) `NewKeeper` now requires `codec.Codec`. + * `types/tx.Tx` no longer implements `sdk.Tx`. + * `sdk.Tx` now requires a new method `GetMsgsV2()`. + * `sdk.Msg.GetSigners` was deprecated and is no longer supported. Use the `cosmos.msg.v1.signer` protobuf annotation instead. + * `TxConfig` has a new method `SigningContext() *signing.Context`. + * `SigVerifiableTx.GetSigners()` now returns `([][]byte, error)` instead of `[]sdk.AccAddress`. + * `AccountKeeper` now has an `AddressCodec() address.Codec` method and the expected `AccountKeeper` for `x/auth/ante` expects this method. +* [#15211](https://github.com/cosmos/cosmos-sdk/pull/15211) Remove usage of `github.com/cometbft/cometbft/libs/bytes.HexBytes` in favor of `[]byte` thorough the SDK. +* (crypto) [#15070](https://github.com/cosmos/cosmos-sdk/pull/15070) `GenerateFromPassword` and `Cost` from `bcrypt.go` now take a `uint32` instead of a `int` type. +* (types) [#15067](https://github.com/cosmos/cosmos-sdk/pull/15067) Remove deprecated alias from `types/errors`. Use `cosmossdk.io/errors` instead. +* (server) [#15041](https://github.com/cosmos/cosmos-sdk/pull/15041) Refactor how gRPC and API servers are started to remove unnecessary sleeps: + * `api.Server#Start` now accepts a `context.Context`. The caller is responsible for ensuring that the context is canceled such that the API server can gracefully exit. The caller does not need to stop the server. + * To start the gRPC server you must first create the server via `NewGRPCServer`, after which you can start the gRPC server via `StartGRPCServer` which accepts a `context.Context`. The caller is responsible for ensuring that the context is canceled such that the gRPC server can gracefully exit. The caller does not need to stop the server. + * Rename `WaitForQuitSignals` to `ListenForQuitSignals`. Note, this function is no longer blocking. Thus the caller is expected to provide a `context.CancelFunc` which indicates that when a signal is caught, that any spawned processes can gracefully exit. + * Remove `ServerStartTime` constant. +* [#15011](https://github.com/cosmos/cosmos-sdk/pull/15011) All functions that were taking a CometBFT logger, now take `cosmossdk.io/log.Logger` instead. +* (simapp) [#14977](https://github.com/cosmos/cosmos-sdk/pull/14977) Move simulation helpers functions (`AppStateFn` and `AppStateRandomizedFn`) to `testutil/sims`. These takes an extra genesisState argument which is the default state of the app. +* (x/bank) [#14894](https://github.com/cosmos/cosmos-sdk/pull/14894) Allow a human readable denomination for coins when querying bank balances. Added a `ResolveDenom` parameter to `types.QueryAllBalancesRequest`. +* [#14847](https://github.com/cosmos/cosmos-sdk/pull/14847) App and ModuleManager methods `InitGenesis`, `ExportGenesis`, `BeginBlock` and `EndBlock` now also return an error. +* (x/upgrade) [#14764](https://github.com/cosmos/cosmos-sdk/pull/14764) The `x/upgrade` module is extracted to have a separate go.mod file which allows it to be a standalone module. +* (x/auth) [#14758](https://github.com/cosmos/cosmos-sdk/pull/14758) Refactor transaction searching: + * Refactor `QueryTxsByEvents` to accept a `query` of type `string` instead of `events` of type `[]string` + * Refactor CLI methods to accept `--query` flag instead of `--events` + * Pass `prove=false` to Tendermint's `TxSearch` RPC method +* (simulation) [#14751](https://github.com/cosmos/cosmos-sdk/pull/14751) Remove the `MsgType` field from `simulation.OperationInput` struct. +* (store) [#14746](https://github.com/cosmos/cosmos-sdk/pull/14746) Extract Store in its own go.mod and rename the package to `cosmossdk.io/store`. +* (x/nft) [#14725](https://github.com/cosmos/cosmos-sdk/pull/14725) Extract NFT in its own go.mod and rename the package to `cosmossdk.io/x/nft`. +* (x/gov) [#14720](https://github.com/cosmos/cosmos-sdk/pull/14720) Add an expedited field in the gov v1 proposal and `MsgNewMsgProposal`. +* (x/feegrant) [#14649](https://github.com/cosmos/cosmos-sdk/pull/14649) Extract Feegrant in its own go.mod and rename the package to `cosmossdk.io/x/feegrant`. +* (tx) [#14634](https://github.com/cosmos/cosmos-sdk/pull/14634) Move the `tx` go module to `x/tx`. +* (store/streaming)[#14603](https://github.com/cosmos/cosmos-sdk/pull/14603) `StoreDecoderRegistry` moved from store to `types/simulations` this breaks the `AppModuleSimulation` interface. +* (snapshots) [#14597](https://github.com/cosmos/cosmos-sdk/pull/14597) Move `snapshots` to `store/snapshots`, rename and bump proto package to v1. +* (x/staking) [#14590](https://github.com/cosmos/cosmos-sdk/pull/14590) `MsgUndelegateResponse` now includes undelegated amount. `x/staking` module's `keeper.Undelegate` now returns 3 values (completionTime,undelegateAmount,error) instead of 2. +* (crypto/keyring) [#14151](https://github.com/cosmos/cosmos-sdk/pull/14151) Move keys presentation from `crypto/keyring` to `client/keys` +* (baseapp) [#14050](https://github.com/cosmos/cosmos-sdk/pull/14050) Refactor `ABCIListener` interface to accept Go contexts. +* (x/auth) [#13850](https://github.com/cosmos/cosmos-sdk/pull/13850/) Remove `MarshalYAML` methods from module (`x/...`) types. +* (modules) [#13850](https://github.com/cosmos/cosmos-sdk/pull/13850) and [#14046](https://github.com/cosmos/cosmos-sdk/pull/14046) Remove gogoproto stringer annotations. This removes the custom `String()` methods on all types that were using the annotations. +* (x/evidence) [14724](https://github.com/cosmos/cosmos-sdk/pull/14724) Extract Evidence in its own go.mod and rename the package to `cosmossdk.io/x/evidence`. +* (crypto/keyring) [#13734](https://github.com/cosmos/cosmos-sdk/pull/13834) The keyring's `Sign` method now takes a new `signMode` argument. It is only used if the signing key is a Ledger hardware device. You can set it to 0 in all other cases. +* (snapshots) [14048](https://github.com/cosmos/cosmos-sdk/pull/14048) Move the Snapshot package to the store package. This is done in an effort group all storage related logic under one package. +* (signing) [#13701](https://github.com/cosmos/cosmos-sdk/pull/) Add `context.Context` as an argument `x/auth/signing.VerifySignature`. +* (store) [#11825](https://github.com/cosmos/cosmos-sdk/pull/11825) Make extension snapshotter interface safer to use, renamed the util function `WriteExtensionItem` to `WriteExtensionPayload`. + +### Client Breaking Changes + +* (x/gov) [#17910](https://github.com/cosmos/cosmos-sdk/pull/17910) Remove telemetry for counting votes and proposals. It was incorrectly counting votes. Use alternatives, such as state streaming. +* (abci) [#15845](https://github.com/cosmos/cosmos-sdk/pull/15845) Remove duplicating events in `logs`. +* (abci) [#15845](https://github.com/cosmos/cosmos-sdk/pull/15845) Add `msg_index` to all event attributes to associate events and messages. +* (x/staking) [#15701](https://github.com/cosmos/cosmos-sdk/pull/15701) `HistoricalInfoKey` now has a binary format. +* (store/streaming) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) State Streaming removed emitting of beginblock, endblock and delivertx in favour of emitting FinalizeBlock. +* (baseapp) [#15519](https://github.com/cosmos/cosmos-sdk/pull/15519/files) BeginBlock & EndBlock events have begin or endblock in the events in order to identify which stage they are emitted from since they are returned to comet as FinalizeBlock events. +* (grpc-web) [#14652](https://github.com/cosmos/cosmos-sdk/pull/14652) Use same port for gRPC-Web and the API server. + +### CLI Breaking Changes + +* (all) The migration of modules to [AutoCLI](https://docs.cosmos.network/main/core/autocli) led to no changes in UX but a [small change in CLI outputs](https://github.com/cosmos/cosmos-sdk/issues/16651) where results can be nested. +* (all) Query pagination flags have been renamed with the migration to AutoCLI: + * `--reverse` -> `--page-reverse` + * `--offset` -> `--page-offset` + * `--limit` -> `--page-limit` + * `--count-total` -> `--page-count-total` +* (cli) [#17184](https://github.com/cosmos/cosmos-sdk/pull/17184) All json keys returned by the `status` command are now snake case instead of pascal case. +* (server) [#17177](https://github.com/cosmos/cosmos-sdk/pull/17177) Remove `iavl-lazy-loading` configuration. +* (x/gov) [#16987](https://github.com/cosmos/cosmos-sdk/pull/16987) In ` query gov proposals` the proposal status flag have renamed from `--status` to `--proposal-status`. Additionally, that flags now uses the ENUM values: `PROPOSAL_STATUS_DEPOSIT_PERIOD`, `PROPOSAL_STATUS_VOTING_PERIOD`, `PROPOSAL_STATUS_PASSED`, `PROPOSAL_STATUS_REJECTED`, `PROPOSAL_STATUS_FAILED`. +* (x/bank) [#16899](https://github.com/cosmos/cosmos-sdk/pull/16899) With the migration to AutoCLI some bank commands have been split in two: + * Use `total-supply` (or `total`) for querying the total supply and `total-supply-of` for querying the supply of a specific denom. + * Use `denoms-metadata` for querying all denom metadata and `denom-metadata` for querying a specific denom metadata. +* (rosetta) [#16276](https://github.com/cosmos/cosmos-sdk/issues/16276) Rosetta migration to standalone repo. +* (cli) [#15826](https://github.com/cosmos/cosmos-sdk/pull/15826) Remove ` q account` command. Use ` q auth account` instead. +* (cli) [#15299](https://github.com/cosmos/cosmos-sdk/pull/15299) Remove `--amino` flag from `sign` and `multi-sign` commands. Amino `StdTx` has been deprecated for a while. Amino JSON signing still works as expected. +* (x/gov) [#14880](https://github.com/cosmos/cosmos-sdk/pull/14880) Remove ` tx gov submit-legacy-proposal cancel-software-upgrade` and `software-upgrade` commands. These commands are now in the `x/upgrade` module and using gov v1. Use `tx upgrade software-upgrade` instead. +* (x/staking) [#14864](https://github.com/cosmos/cosmos-sdk/pull/14864) ` tx staking create-validator` CLI command now takes a json file as an arg instead of using required flags. +* (cli) [#14659](https://github.com/cosmos/cosmos-sdk/pull/14659) ` q block ` is removed as it just output json. The new command allows either height/hash and is ` q block --type=height|hash `. +* (grpc-web) [#14652](https://github.com/cosmos/cosmos-sdk/pull/14652) Remove `grpc-web.address` flag. +* (client) [#14342](https://github.com/cosmos/cosmos-sdk/pull/14342) ` config` command is now a sub-command using Confix. Use ` config --help` to learn more. + +### Bug Fixes + +* (server) [#18254](https://github.com/cosmos/cosmos-sdk/pull/18254) Don't hardcode gRPC address to localhost. +* (x/gov) [#18173](https://github.com/cosmos/cosmos-sdk/pull/18173) Gov hooks now return an error and are *blocking* when they fail. Expect for `AfterProposalFailedMinDeposit` and `AfterProposalVotingPeriodEnded` which log the error and continue. +* (x/gov) [#17873](https://github.com/cosmos/cosmos-sdk/pull/17873) Fail any inactive and active proposals that cannot be decoded. +* (x/slashing) [#18016](https://github.com/cosmos/cosmos-sdk/pull/18016) Fixed builder function for missed blocks key (`validatorMissedBlockBitArrayPrefixKey`) in slashing/migration/v4. +* (x/bank) [#18107](https://github.com/cosmos/cosmos-sdk/pull/18107) Add missing keypair of SendEnabled to restore legacy param set before migration. +* (baseapp) [#17769](https://github.com/cosmos/cosmos-sdk/pull/17769) Ensure we respect block size constraints in the `DefaultProposalHandler`'s `PrepareProposal` handler when a nil or no-op mempool is used. We provide a `TxSelector` type to assist in making transaction selection generalized. We also fix a comparison bug in tx selection when `req.maxTxBytes` is reached. +* (mempool) [#17668](https://github.com/cosmos/cosmos-sdk/pull/17668) Fix `PriorityNonceIterator.Next()` nil pointer ref for min priority at the end of iteration. +* (config) [#17649](https://github.com/cosmos/cosmos-sdk/pull/17649) Fix `mempool.max-txs` configuration is invalid in `app.config`. +* (baseapp) [#17518](https://github.com/cosmos/cosmos-sdk/pull/17518) Utilizing voting power from vote extensions (CometBFT) instead of the current bonded tokens (x/staking) to determine if a set of vote extensions are valid. +* (baseapp) [#17251](https://github.com/cosmos/cosmos-sdk/pull/17251) VerifyVoteExtensions and ExtendVote initialize their own contexts/states, allowing VerifyVoteExtensions being called without ExtendVote. +* (x/distribution) [#17236](https://github.com/cosmos/cosmos-sdk/pull/17236) Using "validateCommunityTax" in "Params.ValidateBasic", preventing panic when field "CommunityTax" is nil. +* (x/bank) [#17170](https://github.com/cosmos/cosmos-sdk/pull/17170) Avoid empty spendable error message on send coins. +* (x/group) [#17146](https://github.com/cosmos/cosmos-sdk/pull/17146) Rename x/group legacy ORM package's error codespace from "orm" to "legacy_orm", preventing collisions with ORM's error codespace "orm". +* (types/query) [#16905](https://github.com/cosmos/cosmos-sdk/pull/16905) Collections Pagination now applies proper count when filtering results. +* (x/bank) [#16841](https://github.com/cosmos/cosmos-sdk/pull/16841) Correctly process legacy `DenomAddressIndex` values. +* (x/auth/vesting) [#16733](https://github.com/cosmos/cosmos-sdk/pull/16733) Panic on overflowing and negative EndTimes when creating a PeriodicVestingAccount. +* (x/consensus) [#16713](https://github.com/cosmos/cosmos-sdk/pull/16713) Add missing ABCI param in `MsgUpdateParams`. +* (baseapp) [#16700](https://github.com/cosmos/cosmos-sdk/pull/16700) Fix consensus failure in returning no response to malformed transactions. +* [#16639](https://github.com/cosmos/cosmos-sdk/pull/16639) Make sure we don't execute blocks beyond the halt height. +* (baseapp) [#16613](https://github.com/cosmos/cosmos-sdk/pull/16613) Ensure each message in a transaction has a registered handler, otherwise `CheckTx` will fail. +* (baseapp) [#16596](https://github.com/cosmos/cosmos-sdk/pull/16596) Return error during `ExtendVote` and `VerifyVoteExtension` if the request height is earlier than `VoteExtensionsEnableHeight`. +* (baseapp) [#16259](https://github.com/cosmos/cosmos-sdk/pull/16259) Ensure the `Context` block height is correct after `InitChain` and prior to the second block. +* (x/gov) [#16231](https://github.com/cosmos/cosmos-sdk/pull/16231) Fix Rawlog JSON formatting of proposal_vote option field.* (cli) [#16138](https://github.com/cosmos/cosmos-sdk/pull/16138) Fix snapshot commands panic if snapshot don't exists. +* (x/staking) [#16043](https://github.com/cosmos/cosmos-sdk/pull/16043) Call `AfterUnbondingInitiated` hook for new unbonding entries only and fix `UnbondingDelegation` entries handling. This is a behavior change compared to Cosmos SDK v0.47.x, now the hook is called only for new unbonding entries. +* (types) [#16010](https://github.com/cosmos/cosmos-sdk/pull/16010) Let `module.CoreAppModuleBasicAdaptor` fallback to legacy genesis handling. +* (types) [#15691](https://github.com/cosmos/cosmos-sdk/pull/15691) Make `Coin.Validate()` check that `.Amount` is not nil. +* (x/crypto) [#15258](https://github.com/cosmos/cosmos-sdk/pull/15258) Write keyhash file with permissions 0600 instead of 0555. +* (x/auth) [#15059](https://github.com/cosmos/cosmos-sdk/pull/15059) `ante.CountSubKeys` returns 0 when passing a nil `Pubkey`. +* (x/capability) [#15030](https://github.com/cosmos/cosmos-sdk/pull/15030) Prevent `x/capability` from consuming `GasMeter` gas during `InitMemStore` +* (types/coin) [#14739](https://github.com/cosmos/cosmos-sdk/pull/14739) Deprecate the method `Coin.IsEqual` in favour of `Coin.Equal`. The difference between the two methods is that the first one results in a panic when denoms are not equal. This panic lead to unexpected behavior. + +### Deprecated + +* (types) [#16980](https://github.com/cosmos/cosmos-sdk/pull/16980) Deprecate `IntProto` and `DecProto`. Instead, `math.Int` and `math.LegacyDec` should be used respectively. Both types support `Marshal` and `Unmarshal` for binary serialization. +* (x/staking) [#14567](https://github.com/cosmos/cosmos-sdk/pull/14567) The `delegator_address` field of `MsgCreateValidator` has been deprecated. + The validator address bytes and delegator address bytes refer to the same account while creating validator (defer only in bech32 notation). + +## Previous Versions + +[CHANGELOG of previous versions](https://github.com/cosmos/cosmos-sdk/blob/main/CHANGELOG.md#v0470---2023-03-14). diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..f91befe --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at community@interchain.io. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/CODING_GUIDELINES.md b/CODING_GUIDELINES.md new file mode 100644 index 0000000..7d3c932 --- /dev/null +++ b/CODING_GUIDELINES.md @@ -0,0 +1,152 @@ +# Coding Guidelines + +This document is an extension to [CONTRIBUTING](./CONTRIBUTING.md) and provides more details about the coding guidelines and requirements. + +## API & Design + +* Code must be well structured: + * packages must have a limited responsibility (different concerns can go to different packages), + * types must be easy to compose, + * think about maintainbility and testability. +* "Depend upon abstractions, [not] concretions". +* Try to limit the number of methods you are exposing. It's easier to expose something later than to hide it. +* Take advantage of `internal` package concept. +* Follow agreed-upon design patterns and naming conventions. +* publicly-exposed functions are named logically, have forward-thinking arguments and return types. +* Avoid global variables and global configurators. +* Favor composable and extensible designs. +* Minimize code duplication. +* Limit third-party dependencies. + +Performance: + +* Avoid unnecessary operations or memory allocations. + +Security: + +* Pay proper attention to exploits involving: + * gas usage + * transaction verification and signatures + * malleability + * code must be always deterministic +* Thread safety. If some functionality is not thread-safe, or uses something that is not thread-safe, then clearly indicate the risk on each level. + +## Acceptance tests + +Start the design by defining Acceptance Tests. The purpose of Acceptance Testing is to +validate that the product being developed corresponds to the needs of the real users +and is ready for launch. Hence we often talk about **User Acceptance Test** (UAT). +It also gives a better understanding of the product and helps designing a right interface +and API. + +UAT should be revisited at each stage of the product development: + +![acceptance-tests.png](./docs/static/img/acceptance-tests.png) + +### Why Acceptance Testing + +* Automated acceptance tests catch serious problems that unit or component test suites could never catch. +* Automated acceptance tests deliver business value the users are expecting as they test user scenarios. +* Automated acceptance tests executed and passed on every build help improve the software delivery process. +* Testers, developers, and customers need to work closely to create suitable automated acceptance test suites. + +### How to define Acceptance Test + +The best way to define AT is by starting from the user stories and think about all positive and negative scenarios a user can perform. + +Product Developers should collaborate with stakeholders to define AT. Functional experts and business users are both needed for defining AT. + +A good pattern for defining AT is listing scenarios with [GIVEN-WHEN-THEN](https://martinfowler.com/bliki/GivenWhenThen.html) format where: + +* **GIVEN**: A set of initial circumstances (e.g. bank balance) +* **WHEN**: Some event happens (e.g. customer attempts a transfer) +* **THEN**: The expected result as per the defined behavior of the system + +In other words: we define a use case input, current state and the expected outcome. Example: + +> Feature: User trades stocks. +> Scenario: User requests a sell before close of trading +> +> Given I have 100 shares of MSFT stock +> And I have 150 shares of APPL stock +> And the time is before close of trading +> +> When I ask to sell 20 shares of MSFT stock +> +> Then I should have 80 shares of MSFT stock +> And I should have 150 shares of APPL stock +> And a sell order for 20 shares of MSFT stock should have been executed + +*Reference: [writing acceptance tests](https://openclassrooms.com/en/courses/4544611-write-agile-documentation-user-stories-acceptance-tests/4810081-writing-acceptance-tests)*. + +### How and where to add acceptance tests + +Acceptance tests are written in the Markdown format, using the scenario template described above, and be part of the specification (`xx_test.md` file in *spec* directory). Example: [`eco-credits/spec/06.test.md`](https://github.com/regen-network/regen-ledger/blob/7297783577e6cd102c5093365b573163680f36a1/x/ecocredit/spec/06_tests.md). + +Acceptance tests should be defined during the design phase or at an early stage of development. Moreover, they should be defined before writing a module architecture - it will clarify the purpose and usage of the software. +Automated tests should cover all acceptance tests scenarios. + +## Automated Tests + +Make sure your code is well tested: + +* Provide unit tests for every unit of your code if possible. Unit tests are expected to comprise 70%-80% of your tests. +* Describe the test scenarios you are implementing for integration tests. +* Create integration tests for queries and msgs. +* Use both test cases and property / fuzzy testing. We use the [rapid](pgregory.net/rapid) Go library for property-based and fuzzy testing. +* Do not decrease code test coverage. Explain in a PR if test coverage is decreased. + +We expect tests to use `require` or `assert` rather than `t.Skip` or `t.Fail`, +unless there is a reason to do otherwise. +When testing a function under a variety of different inputs, we prefer to use +[table driven tests](https://github.com/golang/go/wiki/TableDrivenTests). +Table driven test error messages should follow the following format +`, tc #, i #`. +`` is an optional short description of whats failing, `tc` is the +index within the test case table that is failing, and `i` is when there +is a loop, exactly which iteration of the loop failed. +The idea is you should be able to see the +error message and figure out exactly what failed. +Here is an example check: + +```go + +for tcIndex, tc := range cases { + + resp, err := doSomething() + require.NoError(err) + require.Equal(t, tc.expected, resp, "should correctly perform X") +``` + +## Quality Assurance + +We are forming a QA team that will support the core Cosmos SDK team and collaborators by: + +* Improving the Cosmos SDK QA Processes +* Improving automation in QA and testing +* Defining high-quality metrics +* Maintaining and improving testing frameworks (unit tests, integration tests, and functional tests) +* Defining test scenarios. +* Verifying user experience and defining a high quality. + * We want to have **acceptance tests**! Document and list acceptance lists that are implemented and identify acceptance tests that are still missing. + * Acceptance tests should be specified in `acceptance-tests` directory as Markdown files. +* Supporting other teams with testing frameworks, automation, and User Experience testing. +* Testing chain upgrades for every new breaking change. + * Defining automated tests that assure data integrity after an update. + +Desired outcomes: + +* QA team works with Development Team. +* QA is happening in parallel with Core Cosmos SDK development. +* Releases are more predictable. +* QA reports. Goal is to guide with new tasks and be one of the QA measures. + +As a developer, you must help the QA team by providing instructions for User Experience (UX) and functional testing. + +### QA Team to cross check Acceptance Tests + +Once the AT are defined, the QA team will have an overview of the behavior a user can expect and: + +* validate the user experience will be good +* validate the implementation conforms the acceptance tests +* by having a broader overview of the use cases, QA team should be able to define **test suites** and test data to efficiently automate Acceptance Tests and reuse the work. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..85342ba --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,355 @@ +# Contributing + +* [Teams Dev Calls](#teams-dev-calls) +* [Architecture Decision Records (ADR)](#architecture-decision-records-adr) +* [Development Procedure](#development-procedure) + * [Testing](#testing) + * [Pull Requests](#pull-requests) + * [Pull Request Templates](#pull-request-templates) + * [Requesting Reviews](#requesting-reviews) + * [Updating Documentation](#updating-documentation) + * [RFC & ADR](#RFC & ADR) +* [Dependencies](#dependencies) + * [`go.work`](#gowork) + * [`go.mod`](#gomod) +* [Protobuf](#protobuf) +* [Branching Model and Release](#branching-model-and-release) + * [PR Targeting](#pr-targeting) +* [Code Owner Membership](#code-owner-membership) +* [Concept & Feature Approval Process](#concept--feature-approval-process) + * [Strategy Discovery](#strategy-discovery) + * [Concept Approval](#concept-approval) + * [Time Bound Period](#time-bound-period) + * [Approval Committee & Decision Making](#approval-committee--decision-making) + * [Committee Members](#committee-members) + * [Committee Criteria](#committee-criteria) + * [Implementation & Release Approval](#implementation--release-approval) + +Thank you for considering making contributions to the Cosmos SDK and related repositories! + +Contributing to this repo can mean many things, such as participating in +discussion or proposing code changes. To ensure a smooth workflow for all +contributors, the general procedure for contributing has been established: + +1. Start by browsing [new issues](https://github.com/cosmos/cosmos-sdk/issues) and [discussions](https://github.com/cosmos/cosmos-sdk/discussions). If you are looking for something interesting or if you have something in your mind, there is a chance it had been discussed. + * Looking for a good place to start contributing? How about checking out some [good first issues](https://github.com/cosmos/cosmos-sdk/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) or [bugs](https://github.com/cosmos/cosmos-sdk/issues?q=is%3Aopen+is%3Aissue+label%3A%22T%3A+Bug%22)? +2. Determine whether a GitHub issue or discussion is more appropriate for your needs: + 1. If want to propose something new that requires specification or an additional design, or you would like to change a process, start with a [new discussion](https://github.com/cosmos/cosmos-sdk/discussions/new). With discussions, we can better handle the design process using discussion threads. A discussion usually leads to one or more issues. + 2. If the issue you want addressed is a specific proposal or a bug, then open a [new issue](https://github.com/cosmos/cosmos-sdk/issues/new/choose). + 3. Review existing [issues](https://github.com/cosmos/cosmos-sdk/issues) to find an issue you'd like to help with. +3. Participate in thoughtful discussion on that issue. +4. If you would like to contribute: + 1. Ensure that the proposal has been accepted. + 2. Ensure that nobody else has already begun working on this issue. If they have, + make sure to contact them to collaborate. + 3. If nobody has been assigned for the issue and you would like to work on it, + make a comment on the issue to inform the community of your intentions + to begin work. +5. To submit your work as a contribution to the repository follow standard GitHub best practices. See [pull request guideline](#pull-requests) below. + +**Note:** For very small or blatantly obvious problems such as typos, you are +not required to an open issue to submit a PR, but be aware that for more complex +problems/features, if a PR is opened before an adequate design discussion has +taken place in a GitHub issue, that PR runs a high likelihood of being rejected. + +## Teams Dev Calls + +The Cosmos SDK has many stakeholders contributing and shaping the project. The Core SDK team is composed of Interchain GmbH and Regen Network Development developers. Any long-term contributors and additional maintainers from other projects are welcome. We use self-organizing principles to coordinate and collaborate across organizations in structured "EPIC" that focus on specific problem domains or architectural components of the Cosmos SDK. + +The developers work in sprints, which are available in a [GitHub Project](https://github.com/orgs/cosmos/projects/26/views/22). The current EPICs are pinned at the top of the [issues list](https://github.com/cosmos/cosmos-sdk/issues). + +The important development announcements are shared on [Discord](https://discord.com/invite/cosmosnetwork) in the `#dev-announcements` channel. + +To synchronize we have few major meetings: + +* Cosmos SDK Sprint Review on Monday and Thursday at 14:00 UTC (limited participation to core devs). +* Cosmos SDK Community Call on Thursday at 16:00 UTC. + +If you would like to join one of the community call, then please contact us on [Discord](https://discord.com/invite/cosmosnetwork) or reach out directly to Marko (@tac0turtle). + +## Architecture Decision Records (ADR) + +When proposing an architecture decision for the Cosmos SDK, please start by opening an [issue](https://github.com/cosmos/cosmos-sdk/issues/new/choose) or a [discussion](https://github.com/cosmos/cosmos-sdk/discussions/new) with a summary of the proposal. Once the proposal has been discussed and there is rough alignment on a high-level approach to the design, the [ADR creation process](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/PROCESS.md) can begin. We are following this process to ensure all involved parties are in agreement before any party begins coding the proposed implementation. If you would like to see examples of how these are written, please refer to the current [ADRs](https://github.com/cosmos/cosmos-sdk/tree/main/docs/architecture). + +## Development Procedure + +* The latest state of development is on `main`. +* `main` must never fail `make lint test test-race`. +* No `--force` onto `main` (except when reverting a broken commit, which should seldom happen). +* Create a branch to start work: + * Fork the repo (core developers must create a branch directly in the Cosmos SDK repo), + branch from the HEAD of `main`, make some commits, and submit a PR to `main`. + * For core developers working within the `cosmos-sdk` repo, follow branch name conventions to ensure a clear + ownership of branches: `{moniker}/{issue#}-branch-name`. + * See [Branching Model](#branching-model-and-release) for more details. +* Be sure to run `make format` before every commit. The easiest way + to do this is have your editor run it for you upon saving a file (most of the editors + will do it anyway using a pre-configured setup of the programming language mode). + Additionally, be sure that your code is lint compliant by running `make lint-fix`. + A convenience git `pre-commit` hook that runs the formatters automatically + before each commit is available in the `contrib/githooks/` directory. +* Follow the [CODING GUIDELINES](CODING_GUIDELINES.md), which defines criteria for designing and coding a software. + +Code is merged into main through pull request procedure. + +### Testing + +Tests can be executed by running `make test` at the top level of the Cosmos SDK repository. + +### Pull Requests + +Before submitting a pull request: + +* merge the latest main `git merge origin/main`, +* run `make lint test` to ensure that all checks and tests pass. + +Then: + +1. If you have something to show, **start with a `Draft` PR**. It's good to have early validation of your work and we highly recommend this practice. A Draft PR also indicates to the community that the work is in progress. + Draft PRs also helps the core team provide early feedback and ensure the work is in the right direction. +2. When the code is complete, change your PR from `Draft` to `Ready for Review`. +3. Go through the actions for each checkbox present in the PR template description. The PR actions are automatically provided for each new PR. +4. Be sure to include a relevant changelog entry in the `Unreleased` section of `CHANGELOG.md` (see file for log format). The entry should be on top of all others changes in the section. + +PRs must have a category prefix that is based on the type of changes being made (for example, `fix`, `feat`, +`refactor`, `docs`, and so on). The *type* must be included in the PR title as a prefix (for example, +`fix: `). This convention ensures that all changes that are committed to the base branch follow the +[Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification. +Additionally, each PR should only address a single issue. + +Pull requests are merged automatically using [`A:automerge` action](https://docs.mergify.com/workflow/automerge/). + +NOTE: when merging, GitHub will squash commits and rebase on top of the main. + +### Pull Request Templates + +There are three PR templates. The [default template](./.github/PULL_REQUEST_TEMPLATE.md) is for types `fix`, `feat`, and `refactor`. We also have a [docs template](./.github/PULL_REQUEST_TEMPLATE/docs.md) for documentation changes and an [other template](./.github/PULL_REQUEST_TEMPLATE/other.md) for changes that do not affect production code. When previewing a PR before it has been opened, you can change the template by adding one of the following parameters to the url: + +* `template=docs.md` +* `template=other.md` + +### Requesting Reviews + +In order to accommodate the review process, the author of the PR must complete the author checklist +(from the pull request template) +to the best of their abilities before marking the PR as "Ready for Review". If you would like to +receive early feedback on the PR, open the PR as a "Draft" and leave a comment in the PR indicating +that you would like early feedback and tagging whoever you would like to receive feedback from. + +Codeowners are marked automatically as the reviewers. + +All PRs require at least two review approvals before they can be merged (one review might be acceptable in +the case of minor changes to [docs](./.github/PULL_REQUEST_TEMPLATE/docs.md) or [other](./.github/PULL_REQUEST_TEMPLATE/other.md) changes that do not affect production code). Each PR template has a reviewers checklist that must be completed before the PR can be merged. Each reviewer is responsible +for all checked items unless they have indicated otherwise by leaving their handle next to specific +items. In addition, use the following review explanations: + +* `LGTM` without an explicit approval means that the changes look good, but you haven't thoroughly reviewed the reviewer checklist items. +* `Approval` means that you have completed some or all of the reviewer checklist items. If you only reviewed selected items, you must add your handle next to the items that you have reviewed. In addition, follow these guidelines: + * You must also think through anything which ought to be included but is not + * You must think through whether any added code could be partially combined (DRYed) with existing code + * You must think through any potential security issues or incentive-compatibility flaws introduced by the changes + * Naming must be consistent with conventions and the rest of the codebase + * Code must live in a reasonable location, considering dependency structures (for example, not importing testing modules in production code, or including example code modules in production code). + * If you approve the PR, you are responsible for any issues mentioned here and any issues that should have been addressed after thoroughly reviewing the reviewer checklist items in the pull request template. +* If you sat down with the PR submitter and did a pairing review, add this information in the `Approval` or your PR comments. +* If you are only making "surface level" reviews, submit notes as a `comment` review. + +### Updating Documentation + +If you open a PR on the Cosmos SDK, it is mandatory to update the relevant documentation in `/docs`. + +* If your change relates to the core SDK (baseapp, store, ...), be sure to update the content in `docs/basics/`, `docs/core/` and/or `docs/building-modules/` folders. +* If your changes relate to the core of the CLI (not specifically to module's CLI/Rest), then modify the content in the `docs/run-node/` folder. +* If your changes relate to a module, then be sure to update the module's spec in `x/{moduleName}/README.md`. + +When writing documentation, follow the [Documentation Writing Guidelines](./docs/DOC_WRITING_GUIDELINES.md). + +### RFC & ADR + +Within the Cosmos SDK we have two forms of documenting decisions, Request For Comment (RFC) & Architecture Design Record (ADR). They perform two different functions. The process for assessing if something needs an RFC is located in the respective folders: + +* [RFC Process](./docs/rfc/process.md) +* [ADR Process](./docs/adr/process.md) + + +## Dependencies + +We use [Go Modules](https://github.com/golang/go/wiki/Modules) to manage +dependency versions. + +The main branch of every Cosmos repository should just build with `go get`, +which means they should be kept up-to-date with their dependencies, so we can +get away with telling people they can just `go get` our software. + +Since some dependencies are not under our control, a third party may break our +build, in which case we can fall back on `go mod tidy -v`. + +### `go.mod` + +When extracting a package to its own go modules, some extra steps are required, for keeping our CI checks and Dev UX: + +* Add a CHANGELOG.md / README.md under the new package folder +* Add the package in [`labeler.yml`](./.github/labeler.yml) +* Add weekly dependabot checks (see [dependabot.yml](./.github/dependabot.yml)) +* Add tests to github workflow [test.yml](.github/workflows/test.yml) (under submodules) +* (optional) Configure a `cosmossdk.io` vanity url by submitting a PR to [cosmos/vanity](https://github.com/cosmos/vanity). + +## Protobuf + +We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/cosmos/gogoproto) to generate code for use in Cosmos SDK. + +For deterministic behavior around Protobuf tooling, everything is containerized using Docker. Make sure to have Docker installed on your machine, or head to [Docker's website](https://docs.docker.com/get-docker/) to install it. + +For formatting code in `.proto` files, you can run `make proto-format` command. + +For linting and checking breaking changes, we use [buf](https://buf.build/). You can use the commands `make proto-lint` and `make proto-check-breaking` to respectively lint your proto files and check for breaking changes. + +To generate the protobuf stubs, you can run `make proto-gen`. + +We also added the `make proto-all` command to run all the above commands sequentially. + +In order for imports to properly compile in your IDE, you may need to manually set your protobuf path in your IDE's workspace settings/config. + +For example, in vscode your `.vscode/settings.json` should look like: + +```json +{ + "protoc": { + "options": [ + "--proto_path=${workspaceRoot}/proto", + ] + } +} +``` + +## Branching Model and Release + +User-facing repos should adhere to the trunk based development branching model: https://trunkbaseddevelopment.com. User branches should start with a user name, example: `{moniker}/{issue#}-branch-name`. + +The Cosmos SDK repository is a [multi Go module](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository) repository. It means that we have more than one Go module in a single repository. + +The Cosmos SDK utilizes [semantic versioning](https://semver.org/). + +### PR Targeting + +Ensure that you base and target your PR on the `main` branch. + +All feature additions and all bug fixes must be targeted against `main`. Exception is for bug fixes which are only related to a released version. In that case, the related bug fix PRs must target against the release branch. + +If needed, we backport a commit from `main` to a release branch (excluding consensus breaking feature, API breaking and similar). + +## Code Owner Membership + +In the ethos of open-source projects, and out of necessity to keep the code +alive, the core contributor team will strive to permit special repo privileges +to developers who show an aptitude towards developing with this code base. + +Several different kinds of privileges may be granted however most common +privileges to be granted are merge rights to either part of, or the entirety of the +code base (through the GitHub `CODEOWNERS` file). The on-boarding process for +new code owners is as follows: On a bi-monthly basis (or more frequently if +agreeable) all the existing code owners will privately convene to discuss +potential new candidates as well as the potential for existing code-owners to +exit or "pass on the torch". This private meeting is to be a held as a +phone/video meeting. + +Subsequently after the meeting, and pending final approval from the ICF, +one of the existing code owners should open a PR modifying the `CODEOWNERS` file. +The other code owners should then all approve this PR to publicly display their support. + +Only if unanimous consensus is reached among all the existing code-owners will +an invitation be extended to a new potential-member. Likewise, when an existing +member is suggested to be removed/or have their privileges reduced, the member +in question must agree to the decision for their removal or else no action +should be taken. If however, a code-owner is demonstrably shown to intentionally +have had acted maliciously or grossly negligent, code-owner privileges may be +stripped with no prior warning or consent from the member in question. + +Other potential removal criteria: + +* Missing 3 scheduled meetings results in ICF evaluating whether the member should be + removed / replaced +* Violation of Code of Conduct + +Earning this privilege should be considered to be no small feat and is by no +means guaranteed by any quantifiable metric. Serving as a code owner is a symbol of great trust from +the community of this project. + +## Concept & Feature Approval Process + +The process for how Cosmos SDK maintainers take features and ADRs from concept to release +is broken up into three distinct stages: **Strategy Discovery**, **Concept Approval**, and +**Implementation & Release Approval** + +### Strategy Discovery + +* Develop long term priorities, strategy and roadmap for the Cosmos SDK +* Release committee not yet defined as there is already a roadmap that can be used for the time being + +### Concept Approval + +* Architecture Decision Records (ADRs) may be proposed by any contributors or maintainers of the Cosmos SDK, + and should follow the guidelines outlined in the + [ADR Creation Process](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/PROCESS.md) +* After proposal, a time bound period for Request for Comment (RFC) on ADRs commences +* ADRs are intended to be iterative, and may be merged into `main` while still in a `Proposed` status + +#### Time Bound Period + +* Once a PR for an ADR is opened, reviewers are expected to perform a first review within 1 week of pull request being open +* Time bound period for individual ADR Pull Requests to be merged should not exceed 2 weeks +* Total time bound period for an ADR to reach a decision (`ABANDONED | ACCEPTED | REJECTED`) should not exceed 4 weeks + +If an individual Pull Request for an ADR needs more time than 2 weeks to reach resolution, it should be merged +in current state (`Draft` or `Proposed`), with its contents updated to summarize +the current state of its discussion. + +If an ADR is taking longer than 4 weeks to reach a final conclusion, the **Concept Approval Committee** +should convene to rectify the situation by either: + +* unanimously setting a new time bound period for this ADR +* making changes to the Concept Approval Process (as outlined here) +* making changes to the members of the Concept Approval Committee + +#### Approval Committee & Decision Making + +In absence of general consensus, decision making requires 1/2 vote from the two members +of the **Concept Approval Committee**. + +#### Committee Members + +* Core Members: **Aaron** (Regen), **Bez** (IG) + +#### Committee Criteria + +Members must: + +* Participate in all or almost all ADR discussions, both on GitHub as well as in bi-weekly Architecture Review + meetings +* Be active contributors to the Cosmos SDK, and furthermore should be continuously making substantial contributions + to the project's codebase, review process, documentation and ADRs +* Have stake in the Cosmos SDK project, represented by: + * Being a client / user of the Comsos SDK + * "[giving back](https://www.debian.org/social_contract)" to the software +* Delegate representation in case of vacation or absence + +Code owners need to maintain participation in the process, ideally as members of **Concept Approval Committee** +members, but at the very least as active participants in ADR discussions + +Removal criteria: + +* Missing 3 meetings results in ICF evaluating whether the member should be removed / replaced +* Violation of Code of Conduct + +### Implementation & Release Approval + +The following process should be adhered to both for implementation PRs corresponding to ADRs, as +well as for PRs made as part of a release process: + +* Code reviewers should ensure the PR does exactly what the ADR said it should +* Code reviewers should have more senior engineering capability +* 1/2 approval is required from the **primary repo maintainers** in `CODEOWNERS` + +**Note**: For any major release series denoted as a "Stable Release" (e.g. v0.42 "Stargate"), a separate release +committee is often established. Stable Releases, and their corresponding release committees are documented +separately in [Stable Release Policy](./RELEASE_PROCESS.md#stable-release-policy)* diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..197b3ac --- /dev/null +++ b/Dockerfile @@ -0,0 +1,55 @@ +# Simple usage with a mounted data directory: +# > docker build -t simapp . +# +# Server: +# > docker run -it -p 26657:26657 -p 26656:26656 -v ~/.simapp:/root/.simapp simapp simd init test-chain +# TODO: need to set validator in genesis so start runs +# > docker run -it -p 26657:26657 -p 26656:26656 -v ~/.simapp:/root/.simapp simapp simd start +# +# Client: (Note the simapp binary always looks at ~/.simapp we can bind to different local storage) +# > docker run -it -p 26657:26657 -p 26656:26656 -v ~/.simappcli:/root/.simapp simapp simd keys add foo +# > docker run -it -p 26657:26657 -p 26656:26656 -v ~/.simappcli:/root/.simapp simapp simd keys list +# +# This image is pushed to the GHCR as https://ghcr.io/cosmos/simapp + +FROM golang:1.25-alpine AS build-env + +# Install minimum necessary dependencies +ENV PACKAGES curl make git libc-dev bash gcc linux-headers eudev-dev +RUN apk add --no-cache $PACKAGES + +# Set working directory for the build +WORKDIR /go/src/github.com/cosmos/cosmos-sdk + +# optimization: if go.sum didn't change, docker will use cached image +COPY go.mod go.sum ./ +COPY collections/go.mod collections/go.sum ./collections/ +COPY store/go.mod store/go.sum ./store/ +COPY log/go.mod log/go.sum ./log/ + +RUN go mod download + +# Add source files +COPY . . + +# Dockerfile Cross-Compilation Guide +# https://www.docker.com/blog/faster-multi-platform-builds-dockerfile-cross-compilation-guide +ARG TARGETOS TARGETARCH + +# install simapp, remove packages +RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make build + +# Use alpine:3 as a base image +FROM alpine:3 + +EXPOSE 26656 26657 1317 9090 +# Run simd by default, omit entrypoint to ease using container with simcli +CMD ["simd"] +STOPSIGNAL SIGTERM +WORKDIR /root + +# Install minimum necessary dependencies +RUN apk add --no-cache curl make bash jq sed + +# Copy over binaries from the build-env +COPY --from=build-env /go/src/github.com/cosmos/cosmos-sdk/build/simd /usr/bin/simd diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..a5cec31 --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4e6ebee --- /dev/null +++ b/Makefile @@ -0,0 +1,536 @@ +#!/usr/bin/make -f + +PACKAGES_NOSIMULATION=$(shell go list ./... | grep -v '/simulation') +PACKAGES_SIMTEST=$(shell go list ./... | grep '/simulation') + +# Ensure all tags are fetched +VERSION_RAW := $(shell git fetch --tags --force >/dev/null 2>&1; git describe --tags --always --match "v*") +VERSION := $(shell echo $(VERSION_RAW) | sed -E 's/^v?([0-9]+\.[0-9]+\.[0-9]+.*)/\1/') + +# Fallback if the version is just a commit hash (not semver-like) +ifeq ($(findstring -,$(VERSION)),) # No "-" means it's just a hash + VERSION := 0.0.0-$(VERSION_RAW) +endif +export VERSION +export CMTVERSION := $(shell go list -m github.com/cometbft/cometbft | sed 's:.* ::') +export COMMIT := $(shell git log -1 --format='%H') +LEDGER_ENABLED ?= true +BINDIR ?= $(GOPATH)/bin +BUILDDIR ?= $(CURDIR)/build +SIMAPP = ./simapp +MOCKS_DIR = $(CURDIR)/tests/mocks +HTTPS_GIT := https://github.com/cosmos/cosmos-sdk.git +DOCKER := $(shell which docker) +PROJECT_NAME = $(shell git remote get-url origin | xargs basename -s .git) + +# process build tags +build_tags = netgo +ifeq ($(LEDGER_ENABLED),true) + ifeq ($(OS),Windows_NT) + GCCEXE = $(shell where gcc.exe 2> NUL) + ifeq ($(GCCEXE),) + $(error gcc.exe not installed for ledger support, please install or set LEDGER_ENABLED=false) + else + build_tags += ledger + endif + else + UNAME_S = $(shell uname -s) + ifeq ($(UNAME_S),OpenBSD) + $(warning OpenBSD detected, disabling ledger support (https://github.com/cosmos/cosmos-sdk/issues/1988)) + else + GCC = $(shell command -v gcc 2> /dev/null) + ifeq ($(GCC),) + $(error gcc not installed for ledger support, please install or set LEDGER_ENABLED=false) + else + build_tags += ledger + endif + endif + endif +endif + +ifeq (secp,$(findstring secp,$(COSMOS_BUILD_OPTIONS))) + build_tags += libsecp256k1_sdk +endif + +ifeq (legacy,$(findstring legacy,$(COSMOS_BUILD_OPTIONS))) + build_tags += app_v1 +endif + +whitespace := +whitespace += $(whitespace) +comma := , +build_tags_comma_sep := $(subst $(whitespace),$(comma),$(build_tags)) + +# process linker flags +ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \ + -X github.com/cosmos/cosmos-sdk/version.AppName=simd \ + -X github.com/cosmos/cosmos-sdk/version.Version=$(VERSION) \ + -X github.com/cosmos/cosmos-sdk/version.Commit=$(COMMIT) \ + -X "github.com/cosmos/cosmos-sdk/version.BuildTags=$(build_tags_comma_sep)" \ + -X github.com/cometbft/cometbft/version.TMCoreSemVer=$(CMTVERSION) + +# DB backend selection +ifeq (cleveldb,$(findstring cleveldb,$(COSMOS_BUILD_OPTIONS))) + build_tags += gcc +endif +ifeq (badgerdb,$(findstring badgerdb,$(COSMOS_BUILD_OPTIONS))) + build_tags += badgerdb +endif +# handle rocksdb +ifeq (rocksdb,$(findstring rocksdb,$(COSMOS_BUILD_OPTIONS))) + CGO_ENABLED=1 + build_tags += rocksdb +endif +# handle boltdb +ifeq (boltdb,$(findstring boltdb,$(COSMOS_BUILD_OPTIONS))) + build_tags += boltdb +endif + +ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS))) + ldflags += -w -s +endif +ldflags += $(LDFLAGS) +ldflags := $(strip $(ldflags)) + +build_tags += $(BUILD_TAGS) +build_tags := $(strip $(build_tags)) + +BUILD_FLAGS := -tags "$(build_tags)" -ldflags '$(ldflags)' +# check for nostrip option +ifeq (,$(findstring nostrip,$(COSMOS_BUILD_OPTIONS))) + BUILD_FLAGS += -trimpath +endif + +# Check for debug option +ifeq (debug,$(findstring debug,$(COSMOS_BUILD_OPTIONS))) + BUILD_FLAGS += -gcflags "all=-N -l" +endif + +all: tools build lint test vulncheck + +############################################################################### +### Build ### +############################################################################### + +BUILD_TARGETS := build install + +build: BUILD_ARGS=-o $(BUILDDIR)/ + +build-linux-amd64: + GOOS=linux GOARCH=amd64 LEDGER_ENABLED=false $(MAKE) build + +build-linux-arm64: + GOOS=linux GOARCH=arm64 LEDGER_ENABLED=false $(MAKE) build + +$(BUILD_TARGETS): go.sum $(BUILDDIR)/ + cd ${CURRENT_DIR}/simapp && go $@ -mod=readonly $(BUILD_FLAGS) $(BUILD_ARGS) ./... + +$(BUILDDIR)/: + mkdir -p $(BUILDDIR)/ + +cosmovisor: + $(MAKE) -C tools/cosmovisor cosmovisor + +confix: + $(MAKE) -C tools/confix confix + +hubl: + $(MAKE) -C tools/hubl hubl + +.PHONY: build build-linux-amd64 build-linux-arm64 cosmovisor confix + + +#? mocks: Generate mock file +mocks: $(MOCKS_DIR) + @go install go.uber.org/mock/mockgen@v0.5.0 + sh ./scripts/mockgen.sh +.PHONY: mocks + + +vulncheck: $(BUILDDIR)/ + GOBIN=$(BUILDDIR) go install golang.org/x/vuln/cmd/govulncheck@latest + $(BUILDDIR)/govulncheck ./... + +$(MOCKS_DIR): + mkdir -p $(MOCKS_DIR) + +distclean: clean tools-clean +clean: + rm -rf \ + $(BUILDDIR)/ \ + artifacts/ \ + tmp-swagger-gen/ \ + .testnets + +.PHONY: distclean clean + +############################################################################### +### Tools & Dependencies ### +############################################################################### + +go.sum: go.mod + echo "Ensure dependencies have not been modified ..." >&2 + go mod verify + go mod tidy + +############################################################################### +### Documentation ### +############################################################################### + +godocs: + @echo "--> Wait a few seconds and visit http://localhost:6060/pkg/github.com/cosmos/cosmos-sdk/types" + go install golang.org/x/tools/cmd/godoc@latest + godoc -http=:6060 + +build-docs: + @cd docs && DOCS_DOMAIN=docs.cosmos.network sh ./build-all.sh + +.PHONY: build-docs + +############################################################################### +### Tests & Simulation ### +############################################################################### + +# make init-simapp initializes a single local node network +# it is useful for testing and development +# Usage: make install && make init-simapp && simd start +# Warning: make init-simapp will remove all data in simapp home directory +init-simapp: + ./scripts/init-simapp.sh + +test: test-unit +test-e2e: + $(MAKE) -C tests test-e2e +test-e2e-cov: + $(MAKE) -C tests test-e2e-cov +test-integration: + $(MAKE) -C tests test-integration +test-integration-cov: + $(MAKE) -C tests test-integration-cov +test-all: test-unit test-e2e test-integration test-ledger-mock test-race + +TEST_PACKAGES=./... +TEST_TARGETS := test-unit test-unit-amino test-unit-proto test-ledger-mock test-race test-ledger test-race + +# Test runs-specific rules. To add a new test target, just add +# a new rule, customise ARGS or TEST_PACKAGES ad libitum, and +# append the new rule to the TEST_TARGETS list. +test-unit: test_tags += cgo ledger test_ledger_mock norace +test-unit-amino: test_tags += ledger test_ledger_mock test_amino norace +test-ledger: test_tags += cgo ledger norace +test-ledger-mock: test_tags += ledger test_ledger_mock norace +test-race: test_tags += cgo ledger test_ledger_mock +test-race: ARGS=-race +test-race: TEST_PACKAGES=$(PACKAGES_NOSIMULATION) +$(TEST_TARGETS): run-tests + +# check-* compiles and collects tests without running them +# note: go test -c doesn't support multiple packages yet (https://github.com/golang/go/issues/15513) +CHECK_TEST_TARGETS := check-test-unit check-test-unit-amino +check-test-unit: test_tags += cgo ledger test_ledger_mock norace +check-test-unit-amino: test_tags += ledger test_ledger_mock test_amino norace +$(CHECK_TEST_TARGETS): EXTRA_ARGS=-run=none +$(CHECK_TEST_TARGETS): run-tests + +ARGS += -tags "$(test_tags)" +SUB_MODULES = $(shell find . -type f -name 'go.mod' -print0 | xargs -0 -n1 dirname | sort | grep -v './tests/systemtests') +CURRENT_DIR = $(shell pwd) +run-tests: + @(cd store/streaming/abci/examples/file && go build .) +ifneq (,$(shell which tparse 2>/dev/null)) + @echo "Starting unit tests"; \ + finalec=0; \ + for module in $(SUB_MODULES); do \ + cd ${CURRENT_DIR}/$$module; \ + echo "Running unit tests for $$(grep '^module' go.mod)"; \ + go test -mod=readonly -json $(ARGS) $(TEST_PACKAGES) ./... | tparse; \ + ec=$$?; \ + if [ "$$ec" -ne '0' ]; then finalec=$$ec; fi; \ + done; \ + exit $$finalec +else + @echo "Starting unit tests"; \ + finalec=0; \ + for module in $(SUB_MODULES); do \ + cd ${CURRENT_DIR}/$$module; \ + echo "Running unit tests for $$(grep '^module' go.mod)"; \ + go test -mod=readonly $(ARGS) $(TEST_PACKAGES) ./... ; \ + ec=$$?; \ + if [ "$$ec" -ne '0' ]; then finalec=$$ec; fi; \ + done; \ + exit $$finalec +endif + +.PHONY: run-tests test test-all $(TEST_TARGETS) + +test-sim-nondeterminism: + @echo "Running non-determinism test..." + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout=30m -tags='sims' -run TestAppStateDeterminism \ + -NumBlocks=100 -BlockSize=200 -Period=0 + +# Requires an exported plugin. See store/streaming/README.md for documentation. +# +# example: +# export COSMOS_SDK_ABCI_V1= +# make test-sim-nondeterminism-streaming +# +# Using the built-in examples: +# export COSMOS_SDK_ABCI_V1=/store/streaming/abci/examples/file/file +# make test-sim-nondeterminism-streaming +test-sim-nondeterminism-streaming: + @echo "Running non-determinism-streaming test..." + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout=30m -tags='sims' -run TestAppStateDeterminism \ + -NumBlocks=100 -BlockSize=200 -Period=0 -EnableStreaming=true + +test-sim-custom-genesis-fast: + @echo "Running custom genesis simulation..." + @echo "By default, ${HOME}/.simapp/config/genesis.json will be used." + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout=30m -tags='sims' -run TestFullAppSimulation -Genesis=${HOME}/.simapp/config/genesis.json \ + -NumBlocks=100 -BlockSize=200 -Seed=99 -Period=5 -SigverifyTx=false + +test-sim-import-export: + @echo "Running application import/export simulation. This may take several minutes..." + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout 20m -tags='sims' -run TestAppImportExport \ + -NumBlocks=50 -Period=5 + +test-sim-after-import: + @echo "Running application simulation-after-import. This may take several minutes..." + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout 30m -tags='sims' -run TestAppSimulationAfterImport \ + -NumBlocks=50 -Period=5 + +test-sim-custom-genesis-multi-seed: + @echo "Running multi-seed custom genesis simulation..." + @echo "By default, ${HOME}/.simapp/config/genesis.json will be used." + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout 30m -tags='sims' -run TestFullAppSimulation -Genesis=${HOME}/.simapp/config/genesis.json \ + -NumBlocks=400 -Period=5 + +test-sim-multi-seed-long: + @echo "Running long multi-seed application simulation. This may take awhile!" + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout=1h -tags='sims' -run TestFullAppSimulation \ + -NumBlocks=500 -Period=50 + +test-sim-multi-seed-short: + @echo "Running short multi-seed application simulation. This may take awhile!" + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -timeout 30m -tags='sims' -run TestFullAppSimulation \ + -NumBlocks=50 -Period=10 + +.PHONY: \ +test-sim-nondeterminism \ +test-sim-nondeterminism-streaming \ +test-sim-custom-genesis-fast \ +test-sim-import-export \ +test-sim-after-import \ +test-sim-custom-genesis-multi-seed \ +test-sim-multi-seed-short \ +test-sim-multi-seed-long + +SIM_NUM_BLOCKS ?= 500 +SIM_BLOCK_SIZE ?= 200 +SIM_COMMIT ?= true + +#? test-sim-fuzz: Run fuzz test for simapp +test-sim-fuzz: + @echo "Running application fuzz for numBlocks=2, blockSize=20. This may take awhile!" +#ld flags are a quick fix to make it work on current osx + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -json -tags='sims' -ldflags="-extldflags=-Wl,-ld_classic" -timeout=60m -fuzztime=60m -run=^$$ -fuzz=FuzzFullAppSimulation -GenesisTime=1714720615 -NumBlocks=2 -BlockSize=20 + +#? test-sim-benchmark: Run benchmark test for simapp +test-sim-benchmark: + @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!" + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -tags='sims' -run=^$$ $(.) -bench ^BenchmarkFullAppSimulation$$ \ + -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -Seed=57 -timeout 30m + +# Requires an exported plugin. See store/streaming/README.md for documentation. +# +# example: +# export COSMOS_SDK_ABCI_V1= +# make test-sim-benchmark-streaming +# +# Using the built-in examples: +# export COSMOS_SDK_ABCI_V1=/store/streaming/abci/examples/file/file +# make test-sim-benchmark-streaming +test-sim-benchmark-streaming: + @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!" + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -run=^$$ $(.) -bench ^BenchmarkFullAppSimulation$$ \ + -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -EnableStreaming=true + +test-sim-profile: + @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!" + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -benchmem -run=^$$ $(.) -bench ^BenchmarkFullAppSimulation$$ \ + -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -cpuprofile cpu.out -memprofile mem.out + +# Requires an exported plugin. See store/streaming/README.md for documentation. +# +# example: +# export COSMOS_SDK_ABCI_V1= +# make test-sim-profile-streaming +# +# Using the built-in examples: +# export COSMOS_SDK_ABCI_V1=/store/streaming/abci/examples/file/file +# make test-sim-profile-streaming +test-sim-profile-streaming: + @echo "Running application benchmark for numBlocks=$(SIM_NUM_BLOCKS), blockSize=$(SIM_BLOCK_SIZE). This may take awhile!" + @cd ${CURRENT_DIR}/simapp && go test -failfast -mod=readonly -benchmem -run=^$$ $(.) -bench ^BenchmarkFullAppSimulation$$ \ + -NumBlocks=$(SIM_NUM_BLOCKS) -BlockSize=$(SIM_BLOCK_SIZE) -Commit=$(SIM_COMMIT) -timeout 24h -cpuprofile cpu.out -memprofile mem.out -EnableStreaming=true + +.PHONY: test-sim-profile test-sim-benchmark test-sim-fuzz + +benchmark: + @go test -mod=readonly -bench=. $(PACKAGES_NOSIMULATION) +.PHONY: benchmark + +############################################################################### +### Linting ### +############################################################################### + +golangci_version=v2.6.1 + +lint-install: + @echo "--> Installing golangci-lint $(golangci_version)" + @go install github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(golangci_version) + +lint: + @echo "--> Running linter on all files" + $(MAKE) lint-install + @./scripts/go-lint-all.bash --timeout=15m + +lint-fix: + @echo "--> Running linter" + $(MAKE) lint-install + @./scripts/go-lint-all.bash --fix + +.PHONY: lint lint-fix + +############################################################################### +### Protobuf ### +############################################################################### + +protoVer=0.16.0 +protoImageName=ghcr.io/cosmos/proto-builder:$(protoVer) +protoImage=$(DOCKER) run --rm -v $(CURDIR):/workspace --workdir /workspace $(protoImageName) + +proto-all: proto-format proto-lint proto-gen + +proto-gen: + @echo "Generating Protobuf files" + @$(protoImage) sh ./scripts/protocgen.sh + +proto-swagger-gen: + @echo "Generating Protobuf Swagger" + @$(protoImage) sh ./scripts/protoc-swagger-gen.sh + +proto-format: + @$(protoImage) find ./ -name "*.proto" -exec clang-format -i {} \; + +proto-lint: + @$(protoImage) buf lint --error-format=json + +proto-check-breaking: + @$(protoImage) buf breaking --against $(HTTPS_GIT)#branch=main + +CMT_URL = https://raw.githubusercontent.com/cometbft/cometbft/v0.38.0/proto/tendermint + +CMT_CRYPTO_TYPES = proto/tendermint/crypto +CMT_ABCI_TYPES = proto/tendermint/abci +CMT_TYPES = proto/tendermint/types +CMT_VERSION = proto/tendermint/version +CMT_LIBS = proto/tendermint/libs/bits +CMT_P2P = proto/tendermint/p2p + +proto-update-deps: + @echo "Updating Protobuf dependencies" + + @mkdir -p $(CMT_ABCI_TYPES) + @curl -sSL $(CMT_URL)/abci/types.proto > $(CMT_ABCI_TYPES)/types.proto + + @mkdir -p $(CMT_VERSION) + @curl -sSL $(CMT_URL)/version/types.proto > $(CMT_VERSION)/types.proto + + @mkdir -p $(CMT_TYPES) + @curl -sSL $(CMT_URL)/types/types.proto > $(CMT_TYPES)/types.proto + @curl -sSL $(CMT_URL)/types/evidence.proto > $(CMT_TYPES)/evidence.proto + @curl -sSL $(CMT_URL)/types/params.proto > $(CMT_TYPES)/params.proto + @curl -sSL $(CMT_URL)/types/validator.proto > $(CMT_TYPES)/validator.proto + @curl -sSL $(CMT_URL)/types/block.proto > $(CMT_TYPES)/block.proto + + @mkdir -p $(CMT_CRYPTO_TYPES) + @curl -sSL $(CMT_URL)/crypto/proof.proto > $(CMT_CRYPTO_TYPES)/proof.proto + @curl -sSL $(CMT_URL)/crypto/keys.proto > $(CMT_CRYPTO_TYPES)/keys.proto + + @mkdir -p $(CMT_LIBS) + @curl -sSL $(CMT_URL)/libs/bits/types.proto > $(CMT_LIBS)/types.proto + + @mkdir -p $(CMT_P2P) + @curl -sSL $(CMT_URL)/p2p/types.proto > $(CMT_P2P)/types.proto + + $(DOCKER) run --rm -v $(CURDIR)/proto:/workspace --workdir /workspace $(protoImageName) buf mod update + +.PHONY: proto-all proto-gen proto-swagger-gen proto-format proto-lint proto-check-breaking proto-update-deps + +############################################################################### +### Localnet ### +############################################################################### + +localnet-build-env: + $(MAKE) -C contrib/images simd-env +localnet-build-dlv: + $(MAKE) -C contrib/images simd-dlv + +localnet-build-nodes: + $(DOCKER) run --rm -v $(CURDIR)/.testnets:/data cosmossdk/simd \ + testnet init-files --validator-count 4 -o /data --starting-ip-address 192.168.10.2 --keyring-backend=test + docker compose up -d + +localnet-stop: + docker compose down + +# localnet-start will run a 4-node testnet locally. The nodes are +# based off the docker images in: ./contrib/images/simd-env +localnet-start: localnet-stop localnet-build-env localnet-build-nodes + +# localnet-debug will run a 4-node testnet locally in debug mode +# you can read more about the debug mode here: ./contrib/images/simd-dlv/README.md +localnet-debug: localnet-stop localnet-build-dlv localnet-build-nodes + +.PHONY: localnet-start localnet-stop localnet-debug localnet-build-env localnet-build-dlv localnet-build-nodes + +test-system: build-v50 build + mkdir -p ./tests/systemtests/binaries/ + cp $(BUILDDIR)/simd ./tests/systemtests/binaries/ + mkdir -p ./tests/systemtests/binaries/v0.50 + mv $(BUILDDIR)/simdv50 ./tests/systemtests/binaries/v0.50/simd + $(MAKE) -C tests/systemtests test +.PHONY: test-system + +# build-v50 checks out the v0.50.x branch, builds the binary, and renames it to simdv50. +build-v50: + @echo "Starting v50 build process..." + git_status=$$(git status --porcelain) && \ + has_changes=false && \ + if [ -n "$$git_status" ]; then \ + echo "Stashing uncommitted changes..." && \ + git stash push -m "Temporary stash for v50 build" && \ + has_changes=true; \ + else \ + echo "No changes to stash"; \ + fi && \ + echo "Saving current reference..." && \ + CURRENT_REF=$$(git symbolic-ref --short HEAD 2>/dev/null || git rev-parse HEAD) && \ + echo "Checking out release branch..." && \ + git checkout release/v0.50.x && \ + echo "Building v50 binary..." && \ + make build && \ + mv build/simd build/simdv50 && \ + echo "Returning to original branch..." && \ + if [ "$$CURRENT_REF" = "HEAD" ]; then \ + git checkout $$(git rev-parse HEAD); \ + else \ + git checkout $$CURRENT_REF; \ + fi && \ + if [ "$$has_changes" = "true" ]; then \ + echo "Reapplying stashed changes..." && \ + git stash pop || echo "Warning: Could not pop stash, your changes may be in the stash list"; \ + else \ + echo "No changes to reapply"; \ + fi +.PHONY: build-v50 \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..7befd96 --- /dev/null +++ b/README.md @@ -0,0 +1,33 @@ +

+ Mukan SDK +

+ +

+ A highly customized, sovereign fork of the Cosmos SDK built exclusively for the Mukan Network. +

+ +## Overview + +**Mukan SDK** is the core blockchain framework that powers the Mukan Network. It is a deliberate and permanent hard-fork of the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk), surgically modified to enforce the **Fair Birth** and **Zero Initial Supply** consensus rules of the Mukan Network. + +### Key Architectural Differences from Cosmos SDK + +Unlike standard Cosmos chains which require an initial Proof-of-Stake (PoS) bond to bootstrap the network, the Mukan SDK introduces a fundamental paradigm shift: **"Labor over Capital"**. + +- **Zero-Supply Genesis:** Hardcoded bypass of the `IsPositive()` bonding requirements, allowing validators to join the network and produce blocks with exactly `0 UMC`. +- **Labor-Backed Consensus Power:** Modified `TokensToConsensusPower` logic ensuring that validators with `0 UMC` still possess a minimum voting power of `1`, transitioning the early network into a pure Proof-of-Work (PoW) and Proof-of-Justice (PoJ) environment. +- **Phased PoS Activation:** The traditional Proof-of-Stake mechanics are kept dormant until the initial target supply (1000 MC) is mined through the PoJ module. + +## Integration + +The Mukan SDK is specifically designed to be imported by the `mukan-core` daemon. + +```go +replace github.com/cosmos/cosmos-sdk => git.cw.tr/mukan-network/mukan-sdk +``` + +## License + +Mukan SDK is licensed under the **GNU General Public License v3.0 (GPLv3)**. This ensures that any network built on this framework remains open, fair, and prevents corporate monopolization of the code. + +*Original Cosmos SDK components remain under their respective Apache 2.0 licenses where applicable, but the Mukan SDK fork as a whole is distributed under GPLv3.* diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md new file mode 100644 index 0000000..1922aba --- /dev/null +++ b/RELEASE_NOTES.md @@ -0,0 +1,9 @@ +# Cosmos SDK v0.53.6 Release Notes + +## 🚀 Highlights + +This patch release includes minor dependency bumps and functionality additions. + +## 📝 Changelog + +Check out the [changelog](https://github.com/cosmos/cosmos-sdk/blob/v0.53.6/CHANGELOG.md) for an exhaustive list of changes or [compare changes](https://github.com/cosmos/cosmos-sdk/compare/v0.53.5...v0.53.6) from the last release. \ No newline at end of file diff --git a/RELEASE_PROCESS.md b/RELEASE_PROCESS.md new file mode 100644 index 0000000..adc04ef --- /dev/null +++ b/RELEASE_PROCESS.md @@ -0,0 +1,248 @@ +# Release Process + +This document outlines the process for releasing a new version of Cosmos SDK, which involves major release and patch releases as well as maintenance for the major release. + +> **Note, the Cosmos SDK went directly from v0.47 to v0.50 and skipped the v0.48 and v0.49 versions.** + +## Major Release Procedure + +A _major release_ is an increment of the first number (eg: `v1.2` → `v2.0.0`) or the _point number_ (eg: `v1.1.0 → v1.2.0`, also called _point release_). Each major release opens a _stable release series_ and receives updates outlined in the [Major Release Maintenance](#major-release-maintenance)_section. + +Before making a new _major_ release we do beta and release candidate releases. For example, for release 1.0.0: + +```text +v1.0.0-beta1 → v1.0.0-beta2 → ... → v1.0.0-rc1 → v1.0.0-rc2 → ... → v1.0.0 +``` + +* Release a first beta version on the `main` branch and freeze `main` from receiving any new features. After beta is released, we focus on releasing the release candidate: + * finish audits and reviews + * kick off a large round of simulation testing (e.g. 400 seeds for 2k blocks) + * perform functional tests + * add more tests + * release new beta version as the bugs are discovered and fixed. +* After the team feels that the `main` works fine we create a `release/vY` branch (going forward known a release branch), where `Y` is the version number, with the patch part substituted to `x` (eg: 0.42.x, 1.0.x). Ensure the release branch is protected so that pushes against the release branch are permitted only by the release manager or release coordinator. + * **PRs targeting this branch can be merged _only_ when exceptional circumstances arise** + * update the GitHub mergify integration by adding instructions for automatically backporting commits from `main` to the `release/vY` using the `backport/Y` label. +* In the release branch prepare a new version section in the `CHANGELOG.md` + * All links must point to their respective pull request. + * The `CHANGELOG.md` must contain only the changes of that specific released version. All other changelog entries must be deleted and linked to the `main` branch changelog ([example](https://github.com/cosmos/cosmos-sdk/blob/release/v0.46.x/CHANGELOG.md#previous-versions)). + * Create release notes, in `RELEASE_NOTES.md`, highlighting the new features and changes in the version. This is needed so the bot knows which entries to add to the release page on GitHub. + * Additionally verify that the `UPGRADING.md` file is up to date and contains all the necessary information for upgrading to the new version. +* Remove GitHub workflows that should not be in the release branch + * `deploy-docs.yml`: must be removed to avoid duplicate documentation deployment. + * `test.yml`: All standalone go module tests should be removed (expect `./simapp`, and `./tests` and SDK tests). + * These packages are tracked and tested directly on main. + * `build.yml`: Only the SDK and SimApp needs to be built on release branches. + * Tooling is tracked and tested directly on main. +* Create a new annotated git tag for a release candidate (eg: `git tag -a v1.1.0-rc1`) in the release branch. + * from this point we unfreeze main. + * the SDK teams collaborate and do their best to run testnets in order to validate the release. + * when bugs are found, create a PR for `main`, and backport fixes to the release branch. + * create new release candidate tags after bugs are fixed. +* After the team feels the release branch is stable and everything works, create a full release: + * update `CHANGELOG.md`. + * run `gofumpt -w -l .` to format the code. + * create a new annotated git tag (eg `git -a v1.1.0`) in the release branch. + * Create a GitHub release. + +Following _semver_ philosophy, point releases after `v1.0`: + +* must not break API +* can break consensus + +Before `v1.0`, point release can break both point API and consensus. + +## Patch Release Procedure + +A _patch release_ is an increment of the patch number (eg: `v1.2.0` → `v1.2.1`). + +**Patch release must not break API nor consensus.** + +Updates to the release branch should come from `main` by backporting PRs (usually done by automatic cherry pick followed by a PRs to the release branch). The backports must be marked using `backport/Y` label in PR for main. +It is the PR author's responsibility to fix merge conflicts, update changelog entries, and +ensure CI passes. If a PR originates from an external contributor, a core team member assumes +responsibility to perform this process instead of the original author. +Lastly, it is core team's responsibility to ensure that the PR meets all the SRU criteria. + +Point Release must follow the [Stable Release Policy](#stable-release-policy). + +After the release branch has all commits required for the next patch release: + +* Update `CHANGELOG.md` and `RELEASE_NOTES.md` (if applicable). +* Create a new annotated git tag (eg `git -a v1.1.0`) in the release branch. + * If the release is a submodule update, first go the submodule folder and name the tag prepending the path to the version: + `cd core && git -a core/v1.1.0` or `cd tools/cosmovisor && git -a tools/cosmovisor/v1.4.0` +* Create a GitHub release (if applicable). + +## Major Release Maintenance + +Major Release series continue to receive bug fixes (released as a Patch Release) until they reach **End Of Life**. +Major Release series is maintained in compliance with the **Stable Release Policy** as described in this document. + +Only the following major release series have a stable release status: + +* **0.46** is the previous major release and is supported until the release of **0.50.0**. A fairly strict **bugfix-only** rule applies to pull requests that are requested to be included into a not latest stable point-release. +* **0.47** is the last major release and is supported until the release of **0.51.0**. + +The SDK team maintains the last two major releases, any other major release is considered to have reached end of life. +The SDK team will not backport any bug fixes to releases that are not supported. +Widely-used (decided at SDK team's discretion) unsupported releases are considered to be in a security maintenance mode. The SDK team will backport security fixes to these releases. + +## Stable Release Policy + +### Patch Releases + +Once a Cosmos-SDK release has been completed and published, updates for it are released under certain circumstances +and must follow the [Patch Release Procedure](CONTRIBUTING.md#branching-model-and-release). + +### Rationale + +Unlike in-development `main` branch snapshots, **Cosmos SDK** releases are subject to much wider adoption, +and by a significantly different demographic of users. During development, changes in the `main` branch +affect SDK users, application developers, early adopters, and other advanced users that elect to use +unstable experimental software at their own risk. + +Conversely, users of a stable release expect a high degree of stability. They build their applications on it, and the +problems they experience with it could be potentially highly disruptive to their projects. + +Stable release updates are recommended to the vast majority of developers, and so it is crucial to treat them +with great caution. Hence, when updates are proposed, they must be accompanied by a strong rationale and present +a low risk of regressions, i.e. even one-line changes could cause unexpected regressions due to side effects or +poorly tested code. We never assume that any change, no matter how little or non-intrusive, is completely exempt +of regression risks. + +Therefore, the requirements for stable changes are different than those that are candidates to be merged in +the `main` branch. When preparing future major releases, our aim is to design the most elegant, user-friendly and +maintainable SDK possible which often entails fundamental changes to the SDK's architecture design, rearranging and/or +renaming packages as well as reducing code duplication so that we maintain common functions and data structures in one +place rather than leaving them scattered all over the code base. However, once a release is published, the +priority is to minimize the risk caused by changes that are not strictly required to fix qualifying bugs; this tends to +be correlated with minimizing the size of such changes. As such, the same bug may need to be fixed in different +ways in stable releases and `main` branch. + +### Migrations + +See the SDK's policy on migrations [here](https://docs.cosmos.network/main/migrations/intro). + +### What qualifies as a Stable Release Update (SRU) + +* **High-impact bugs** + * Bugs that may directly cause a security vulnerability. + * _Severe regressions_ from a Cosmos-SDK's previous release. This includes all sort of issues + that may cause the core packages or the `x/` modules unusable. + * Bugs that may cause **loss of user's data**. +* Other safe cases: + * Bugs which don't fit in the aforementioned categories for which an obvious safe patch is known. + * Relatively small yet strictly non-breaking features with strong support from the community. + * Relatively small yet strictly non-breaking changes that introduce forward-compatible client + features to smoothen the migration to successive releases. + * Relatively small yet strictly non-breaking CLI improvements. + +### What does not qualify as SRU + +* State machine changes. +* Breaking changes in Protobuf definitions, as specified in [ADR-044](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-044-protobuf-updates-guidelines.md). +* Changes that introduces API breakages (e.g. public functions and interfaces removal/renaming). +* Client-breaking changes in gRPC and HTTP request and response types. +* CLI-breaking changes. +* Cosmetic fixes, such as formatting or linter warning fixes. + +### What pull requests will be included in stable point-releases + +Pull requests that fix bugs and add features that fall in the following categories do not require a **Stable Release Exception** to be granted to be included in a stable point-release: + +* **Severe regressions**. +* Bugs that may cause **client applications** to be **largely unusable**. +* Bugs that may cause **state corruption or data loss**. +* Bugs that may directly or indirectly cause a **security vulnerability**. +* Non-breaking features that are strongly requested by the community. +* Non-breaking CLI improvements that are strongly requested by the community. + +### What pull requests will NOT be automatically included in stable point-releases + +As rule of thumb, the following changes will **NOT** be automatically accepted into stable point-releases: + +* **State machine changes**. +* **Protobug-breaking changes**, as specified in [ADR-044](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-044-protobuf-updates- guidelines.md). +* **Client-breaking changes**, i.e. changes that prevent gRPC, HTTP and RPC clients to continue interacting with the node without any change. +* **API-breaking changes**, i.e. changes that prevent client applications to _build without modifications_ to the client application's source code. +* **CLI-breaking changes**, i.e. changes that require usage changes for CLI users. + + In some circumstances, PRs that don't meet the aforementioned criteria might be raised and asked to be granted a _Stable Release Exception_. + +### Stable Release Exception - Procedure + +1. Check that the bug is either fixed or not reproducible in `main`. It is, in general, not appropriate to release bug fixes for stable releases without first testing them in `main`. Please apply the label [v0.43](https://github.com/cosmos/cosmos-sdk/milestone/26) to the issue. +2. Add a comment to the issue and ensure it contains the following information (see the bug template below): + + * **[Impact]** An explanation of the bug on users and justification for backporting the fix to the stable release. + * A **[Test Case]** section containing detailed instructions on how to reproduce the bug. + * A **[Regression Potential]** section with a clear assessment on how regressions are most likely to manifest as a result of the pull request that aims to fix the bug in the target stable release. + +3. **Stable Release Managers** will review and discuss the PR. Once _consensus_ surrounding the rationale has been reached and the technical review has successfully concluded, the pull request will be merged in the respective point-release target branch (e.g. `release/v0.43.x`) and the PR included in the point-release's respective milestone (e.g. `v0.43.5`). + +#### Stable Release Exception - Bug template + +```md +#### Impact + +Brief xplanation of the effects of the bug on users and a justification for backporting the fix to the stable release. + +#### Test Case + +Detailed instructions on how to reproduce the bug on Stargate's most recently published point-release. + +#### Regression Potential + +Explanation on how regressions might manifest - even if it's unlikely. +It is assumed that stable release fixes are well-tested and they come with a low risk of regressions. +It's crucial to make the effort of thinking about what could happen in case a regression emerges. +``` + +### Stable Release Managers + +The **Stable Release Managers** evaluate and approve or reject updates and backports to Cosmos SDK Stable Release series, +according to the [stable release policy](#stable-release-policy) and [release procedure](#major-release-procedure). +Decisions are made by consensus. + +Their responsibilites include: + +* Driving the Stable Release Exception process. +* Approving/rejecting proposed changes to a stable release series. +* Executing the release process of stable point-releases in compliance with the [Point Release Procedure](CONTRIBUTING.md). + +Currently residing Stable Release Managers: + +* @tac0turtle - Marko Baricevic +* @julienrbrt - Julien Robert + +## Cosmos SDK Modules + +The Cosmos SDK repository is a mono-repo where its Go modules have a different release process and cadence than the Cosmos SDK itself. +There are two types of modules: + +1. Modules that import the Cosmos SDK and depend on a specific version of it. + * Modules to be imported in an app (e.g `x/` modules). + * Modules that are not imported into an app and are a standalone module (e.g. `cosmovisor`). +2. Modules that do not depend on the Cosmos SDK. + +The same changelog procedure applies to all modules in the Cosmos SDK repository, and must be up-to-date with the latest changes before tagging a module version. +Note: The Cosmos SDK team is in an active process of limiting Go modules that depend on the Cosmos SDK. + +### Modules that depend on the Cosmos SDK + +The Cosmos SDK team should strive to release modules that depend on the Cosmos SDK at the same time or soon after a major version Cosmos SDK itself. +Those modules can be considered as part of the Cosmos SDK, but features and improvements are released at a different cadence. + +* When a module is supposed to be used in an app (e.g `x/` modules), due to the dependency on the SDK, tagging a new version of a module must be done from a Cosmos SDK release branch. A compability matrix must be provided in the `README.md` of that module with the corresponding versions. +* Modules that import the SDK but do not need to be imported in an app (`e.g. cosmovisor`) must be released from the `main` branch and follow the process defined below. + +### Modules that do not depend on the Cosmos SDK + +Modules that do not depend on the Cosmos SDK can be released at any time from the `main` branch of the Cosmos SDK repository. + +#### Branches For Go Modules + +Branches that go modules are released from: + +* Store v1 is released from `release/v0.50.x` branch. diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 0000000..d2086a8 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,270 @@ +# Roadmap 2023 + +Welcome to the Cosmos SDK's team roadmap. + +> This document is meant to help the team get feedback on the proposed work and for others to follow where we stand in our process. This will be a living document updated on a regular basis. If you'd like to participate in any workscope or would like to suggest another feature please reach out to [Marko](marko@binary.builders) or [Sam](sam@binary.builders) and we will schedule a call to discuss the feature request. + + +## Q1 + +### Storage + +* [x] [Produce a spec for the new store design](https://github.com/cosmos/cosmos-sdk/issues/12986) + * Research a new design for store. This could entail writing some POC's in order to identify design patterns +* [x] Store as its own go module + * Store module should be its own go.mod without a dependency on the Cosmos SDK +* [ ] [Begin implementation of store v2](https://github.com/cosmos/cosmos-sdk/pull/15028) + * Identify the migration path from store v1 -> store v2 +* [ ] Parallel execution of state + * RFC/ADR is merged into the main on the sdk +* [ ] Optimistic execution + * RFC/ADR is merged into main on the sdk + + +### Client UX + +* [x] Release v1 of query support (auto-cli) + * A version of query support has been merged, documentation is missing +* [ ] Dynamic metadata support + * Dynamic support allows the Cosmos SDK to release a cmd line tool that could work with any chain. + * Add metadata support to latest version of Cosmos SDK and, if possible, backport to older versions +* [x] Multi-chain command **(Done)** + * Release a cmd line tool that can be pointed a grpc endpoint which then can produce cmd lines to interact with the chain +* [x] Auto-cli tx support + * Tx support for auto-cli/hubl + * This would fully remove the need for application developers to write cli commands for their modules +* [ ] [Consensus Key Rotation](https://github.com/cosmos/cosmos-sdk/issues/5231) + + +### Dev UX + +* [x] [Release collections v0.1](https://github.com/cosmos/cosmos-sdk/issues/14300) + * Collections is a new abstraction layer similar to the ORM. In the ADR phase it received support from many in the ecosystem. + * V1 release should allow modules to be migrated to collections. + * Migrate 3 modules to use collections api + * Migrating 3 modules to use collections would help in show users how to migrate users +* [ ] [Release ORM v1](https://github.com/cosmos/cosmos-sdk/issues/11088) +* [x] [Sign mode textual](https://github.com/cosmos/cosmos-sdk/issues/11970) + * Sign mode textual has been under construction for 2 quarters now, this quarter the goal is to move towards v1 and potentially line up a audit before final release. +* [x] Core API + * [Merge ADR for Core API](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-063-core-module-api.md) + * Migrate three modules to use core api +* [x] Module Dependency + * Give three modules their own go.mods +* [ ] [Metamask signing directly into the sdk](https://github.com/cosmos/cosmos-sdk/discussions/13892) +* [ ] [ADR-033 (internal message routing)](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-033-protobuf-inter-module-comm.md) + * Merge internal message router + * Add docs on how to use the router +* [x] [ADR-54 (dependency management)](https://github.com/cosmos/cosmos-sdk/pull/11802) + * Come to consensus on module dependency graph for the future + * Begin socializing conventions +* [ ] [Remove global bech32](https://github.com/cosmos/cosmos-sdk/issues/13140) +* [x] [Auth module](https://github.com/cosmos/cosmos-sdk/issues/14900) + * Produce a spec/ADR on a proposed new auth module. +* [x] [Implement Amino Json encoder](https://github.com/cosmos/cosmos-sdk/issues/10993) + +### Testing + +* [x] [integration testing framework](https://github.com/cosmos/cosmos-sdk/issues/14145) + * design and merge a integration testing framework. + * The goals of the framework would that a module only needs to depend on modules that it depends on outside of testing, not all modules in the sdk like today. + + +### ABCI 2.0 + +Issue: https://github.com/cosmos/cosmos-sdk/issues/12272 + +* [x] [ADR ABCI 2.0](https://github.com/cosmos/cosmos-sdk/issues/14674) + * Write ADR for integration of vote extensions & finalize block + +**Blocked**: + +> once cometBFT has a release candidate of ABCI 2.0 (cmt 0.38) + +* Integrate ABCI 2.0 + +### Security + +* [ ] [Circuit breaker](https://github.com/cosmos/cosmos-sdk/issues/14226) + * Implement the circuit breaker module and prepare releases for Cosmos SDK versions 0.45, 0.46 and 0.47 + + +### IAVL + +* [x] [ADR 001](https://github.com/cosmos/iavl/pull/608) + * Implementation has been completed, waiting on reviewers +* [x] [Prepare the migration path](https://github.com/cosmos/iavl/issues/675) + * Migration path has not been finalized +* [ ] Test on mainnets + * If possible we would like to test the new IAVL design on mainnets to observer behavior changes. + +Issue: https://github.com/cosmos/iavl/issues/548 + +## Q2 + +### Storage + +* [ ] [Storage v2](https://github.com/cosmos/cosmos-sdk/issues/12986) + * Objective: + * Goal is to get 60% of the way through the rewrite of storage + * Begin testing the rewrite on mainnets if possible + * External Audit + * Progress: + * On pause till ABCI 2.0 ships +* [ ] [Optimistic Execution](https://github.com/cosmos/cosmos-sdk/issues/15365) + * Objective: + * Users should be able to pick between delayed execution and optimistic + * RFC/ADR is merged + * Implementation started. + * Progess: + * On pause till ABCI 2.0 is merged + +### Client UX + +* [ ] Hubl/AutoCLI + * Objective: + * Allow users to sign and submit transactions using hubl + * Add module support for autocli + * Deprecate/remove legacy cli (optional) + * Progress: + * Signing support is being refactored and is near completion + * Adapting modules to use autocli instead of manually written cli +* [ ] [Consensus Key Rotation](https://github.com/cosmos/cosmos-sdk/issues/5231) + * Objective: + * Allow users to rotate consensus keys for their validators + * Progress + * Merge pull request and updated ADR into main +* [ ] [Operator key rotation](https://github.com/cosmos/cosmos-sdk/issues/3863) + * Objective: + * Allow users to rotate operator keys for their validators + * Progress: + * secondary val index was added to support operator key rotation + * Pr is open and is being worked on + +### Dev UX + +* Toolkit/SDK ADR. + * Objective: + * Produce a RFC/ADR on how to make core composable + * Merge RFC/ADR into main + * Progress: + * on pause until abci 2.0 integration is completed +* Adopt core api fully in modules + * Objective: + * Remove the Cosmos SDK and Comet as a dependency from all modules + * Release v1 of modules that have their dependency graph cleaned up + * Spin out 4 more modules into their own go.mods + * Progress: + * Core api has been integrated into all modules + * KvStoreService is being added to all modules +* [ ] [Remove global bech32](https://github.com/cosmos/cosmos-sdk/issues/13140) + * Objective: + * Depreacte global bech32 setting + * Progress: + * All modules except bank use the `address.Codec` +* [ ] Make sdk.Msg only be `proto.message` + * Objectives: + * Reduce sdk.Msg to only be proto.message + * Reduce boilerplate in `msgs.go` + * Progess: + * [x] [Make ValidateBasic Optional](https://github.com/cosmos/cosmos-sdk/issues/15648) + * [ ] [Make GetSigners be optional](https://github.com/cosmos/cosmos-sdk/issues/15677) + * [ ] Remove GetsignBytes for legacy amino encoding +* [ ] [Collections](https://github.com/cosmos/cosmos-sdk/issues/14300) + * Objectives + * Migrate all modules + * Add query support + * Add schema support + * Progress: + * We have migrated three modules + +### ABCI 2.0 + +* [ ] [ABCI 2.0](https://github.com/cosmos/cosmos-sdk/issues/12272) + * Objectives: + * Integrate comet 0.38 + * QA + * Progress: + * Integration has started + + +### Testing + +* [ ] [Integration framework](https://github.com/cosmos/cosmos-sdk/issues/14145) + * Objectives: + * Migrate all modules + * Progress: + * We have migrated 2-4 modules currently + +### Modules + +* [ ] [Invariant Checking](https://github.com/cosmos/cosmos-sdk/issues/15706) + * Objective: + * Design a new system for checking invairants + * Implement changes + * Audit current invariants + * Progress: + * NA +* [ ] [Accounts](https://github.com/cosmos/cosmos-sdk/issues/14900) + * Objective: + * Allow users to use account abstractions + * Implementation is completed + * External Audit is scheduled (if needed) + * Progress: + * RFC/ADR is in review + +### Research + +* [ ] Commitment Structure + * Objective: + * Identify different commitment structures that could be used in the sdk and how they would be plugged (highlevel) + * Progress: + * Conversations in slack have started +* [ ] Cross lang + * Objective: + * Answer, what is needed to support many languages + * Answer, what sort of encoding of passing between the cgo/ffi boundary + * Progress: + * working group is meeting bi weekly + * ADR/RFC is in review + +## Q3 + +### Storage + +* Audit & release of storage refactor +* Identify further optimizations for storage + * Goal is to identify the next bottlenecks in storage or the state machine + +### Dev UX + +* Complete Toolkit/SDK implementation refactor + * Goal is to release the new version of the sdk allowing for further composability +* Implement fee market abstractions + * Goal is to release an alpha version of fee market abstractions + +### Modules + +* Governance + * Make gov and groups composable with each other, not duplicate +* Staking + * Research a new staking design + * Begin Implementation + + +### Research + +* Nonce Lanes + * Goal is to produce a spec and/or viability of using lanes for nonces instead of a single sequence number. + +## Q4 + +### Research + +* Stateless clients + * research how stateless clients could evolve in cosmos + + + + +This document will be updated at the end of the quarter on what was achieved and what was not. Shortly before the quarter concludes a new section will be added for the next quarter. We are working on updating the complete one year roadmap and will be posting it here as well. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..2db2346 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,79 @@ +# Coordinated Vulnerability Disclosure Policy + +The Cosmos ecosystem believes that strong security is a blend of highly +technical security researchers who care about security and the forward +progression of the ecosystem and the attentiveness and openness of Cosmos core +contributors to help continually secure our operations. + +> **IMPORTANT**: *DO NOT* open public issues on this repository for security +> vulnerabilities. + +## Scope + +| Scope | +|-----------------------| +| last release (tagged) | +| main branch | + +The latest **release tag** of this repository is supported for security updates +as well as the **main** branch. Security vulnerabilities should be reported if +the vulnerability can be reproduced on either one of those. + +## Reporting a Vulnerability + +| Reporting methods | +|---------------------------------------------------------------| +| [GitHub Private Vulnerability Reporting][gh-private-advisory] | +| [HackerOne bug bounty program][h1] | + +All security vulnerabilities can be reported under GitHub's [Private +vulnerability reporting][gh-private-advisory] system. This will open a private +issue for the developers. Try to fill in as much of the questions as possible. +If you are not familiar with the CVSS system for assessing vulnerabilities, just +use the Low/High/Critical severity ratings. A partially filled in report for a +critical vulnerability is still better than no report at all. + +Vulnerabilities associated with the **Go, Rust or Protobuf code** of the +repository may be eligible for a [bug bounty][h1]. Please see the bug bounty +page for more details on submissions and rewards. If you think the vulnerability +is eligible for a payout, **report on HackerOne first**. + +Vulnerabilities in services and their source codes (JavaScript, web page, Google +Workspace) are not in scope for the bug bounty program, but they are welcome to +be reported in GitHub. + +### Guidelines + +We require that all researchers: + +* Abide by this policy to disclose vulnerabilities, and avoid posting + vulnerability information in public places, including GitHub, Discord, + Telegram, and Twitter. +* Make every effort to avoid privacy violations, degradation of user experience, + disruption to production systems (including but not limited to the Cosmos + Hub), and destruction of data. +* Keep any information about vulnerabilities that you’ve discovered confidential + between yourself and the Cosmos engineering team until the issue has been + resolved and disclosed. +* Avoid posting personally identifiable information, privately or publicly. + +If you follow these guidelines when reporting an issue to us, we commit to: + +* Not pursue or support any legal action related to your research on this + vulnerability +* Work with you to understand, resolve and ultimately disclose the issue in a + timely fashion + +### More information + +* See [TIMELINE.md] for an example timeline of a disclosure. +* See [DISCLOSURE.md] to see more into the inner workings of the disclosure + process. +* See [EXAMPLES.md] for some of the examples that we are interested in for the + bug bounty program. + +[gh-private-advisory]: /../../security/advisories/new +[h1]: https://hackerone.com/cosmos +[TIMELINE.md]: https://github.com/cosmos/security/blob/main/TIMELINE.md +[DISCLOSURE.md]: https://github.com/cosmos/security/blob/main/DISCLOSURE.md +[EXAMPLES.md]: https://github.com/cosmos/security/blob/main/EXAMPLES.md diff --git a/UPGRADE_GUIDE.md b/UPGRADE_GUIDE.md new file mode 100644 index 0000000..9f2b4de --- /dev/null +++ b/UPGRADE_GUIDE.md @@ -0,0 +1,503 @@ +# Upgrade Guide + +This document provides a full guide for upgrading a Cosmos SDK chain from `v0.50.x` to `v0.53.x`. + +This guide includes one **required** change and three **optional** features. + +After completing this guide, applications will have: + +- The `x/protocolpool` module +- The `x/epochs` module +- Unordered Transaction support + +## Table of Contents + +- [App Wiring Changes (REQUIRED)](#app-wiring-changes-required) +- [Adding ProtocolPool Module (OPTIONAL)](#adding-protocolpool-module-optional) + - [ProtocolPool Manual Wiring](#protocolpool-manual-wiring) + - [ProtocolPool DI Wiring](#protocolpool-di-wiring) +- [Adding Epochs Module (OPTIONAL)](#adding-epochs-module-optional) + - [Epochs Manual Wiring](#epochs-manual-wiring) + - [Epochs DI Wiring](#epochs-di-wiring) +- [Enable Unordered Transactions (OPTIONAL)](#enable-unordered-transactions-optional) +- [Upgrade Handler](#upgrade-handler) + +## App Wiring Changes **REQUIRED** + +The `x/auth` module now contains a `PreBlocker` that _must_ be set in the module manager's `SetOrderPreBlockers` method. + +```go +app.ModuleManager.SetOrderPreBlockers( + upgradetypes.ModuleName, + authtypes.ModuleName, // NEW +) +``` + +## Adding ProtocolPool Module **OPTIONAL** + +:::warning + +Using an external community pool such as `x/protocolpool` will cause the following `x/distribution` handlers to return an error: + +**QueryService** + +- `CommunityPool` + +**MsgService** + +- `CommunityPoolSpend` +- `FundCommunityPool` + +If your services depend on this functionality from `x/distribution`, please update them to use either `x/protocolpool` or your custom external community pool alternatives. + +::: + +### Manual Wiring + +Import the following: + +```go +import ( + // ... + "github.com/cosmos/cosmos-sdk/x/protocolpool" + protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper" + protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types" +) +``` + +Set the module account permissions. + +```go +maccPerms = map[string][]string{ + // ... + protocolpooltypes.ModuleName: nil, + protocolpooltypes.ProtocolPoolEscrowAccount: nil, +} +``` + +Add the protocol pool keeper to your application struct. + +```go +ProtocolPoolKeeper protocolpoolkeeper.Keeper +``` + +Add the store key: + +```go +keys := storetypes.NewKVStoreKeys( + // ... + protocolpooltypes.StoreKey, +) +``` + +Instantiate the keeper. + +Make sure to do this before the distribution module instantiation, as you will pass the keeper there next. + +```go +app.ProtocolPoolKeeper = protocolpoolkeeper.NewKeeper( + appCodec, + runtime.NewKVStoreService(keys[protocolpooltypes.StoreKey]), + app.AccountKeeper, + app.BankKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), +) +``` + +Pass the protocolpool keeper to the distribution keeper: + +```go +app.DistrKeeper = distrkeeper.NewKeeper( + appCodec, + runtime.NewKVStoreService(keys[distrtypes.StoreKey]), + app.AccountKeeper, + app.BankKeeper, + app.StakingKeeper, + authtypes.FeeCollectorName, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper), // NEW +) +``` + +Add the protocolpool module to the module manager: + +```go +app.ModuleManager = module.NewManager( + // ... + protocolpool.NewAppModule(appCodec, app.ProtocolPoolKeeper, app.AccountKeeper, app.BankKeeper), +) +``` + +Add an entry for SetOrderBeginBlockers, SetOrderEndBlockers, SetOrderInitGenesis, and SetOrderExportGenesis. + +```go +app.ModuleManager.SetOrderBeginBlockers( + // must come AFTER distribution. + distrtypes.ModuleName, + protocolpooltypes.ModuleName, +) +``` + +```go +app.ModuleManager.SetOrderEndBlockers( + // order does not matter. + protocolpooltypes.ModuleName, +) +``` + +```go +app.ModuleManager.SetOrderInitGenesis( + // order does not matter. + protocolpooltypes.ModuleName, +) +``` + +```go +app.ModuleManager.SetOrderInitGenesis( + protocolpooltypes.ModuleName, // must be exported before bank. + banktypes.ModuleName, +) +``` + +### DI Wiring + +Note: _as long as an external community pool keeper (here, `x/protocolpool`) is wired in DI configs, `x/distribution` will automatically use it for its external pool._ + +First, set up the keeper for the application. + +Import the protocolpool keeper: + +```go +protocolpoolkeeper "github.com/cosmos/cosmos-sdk/x/protocolpool/keeper" +``` + +Add the keeper to your application struct: + +```go +ProtocolPoolKeeper protocolpoolkeeper.Keeper +``` + +Add the keeper to the depinject system: + +```go +depinject.Inject( + appConfig, + &appBuilder, + &app.appCodec, + &app.legacyAmino, + &app.txConfig, + &app.interfaceRegistry, + // ... other modules + &app.ProtocolPoolKeeper, // NEW MODULE! +) +``` + +Next, set up configuration for the module. + +Import the following: + +```go +import ( + protocolpoolmodulev1 "cosmossdk.io/api/cosmos/protocolpool/module/v1" + + _ "github.com/cosmos/cosmos-sdk/x/protocolpool" // import for side-effects + protocolpooltypes "github.com/cosmos/cosmos-sdk/x/protocolpool/types" +) +``` + +The protocolpool module has module accounts that handle funds. Add them to the module account permission configuration: + +```go +moduleAccPerms = []*authmodulev1.ModuleAccountPermission{ + // ... + {Account: protocolpooltypes.ModuleName}, + {Account: protocolpooltypes.ProtocolPoolEscrowAccount}, +} +``` + +Next, add an entry for BeginBlockers, EndBlockers, InitGenesis, and ExportGenesis. + +```go +BeginBlockers: []string{ + // ... + // must be AFTER distribution. + distrtypes.ModuleName, + protocolpooltypes.ModuleName, +}, +``` + +```go +EndBlockers: []string{ + // ... + // order for protocolpool does not matter. + protocolpooltypes.ModuleName, +}, +``` + +```go +InitGenesis: []string{ + // ... must be AFTER distribution. + distrtypes.ModuleName, + protocolpooltypes.ModuleName, +}, +``` + +```go +ExportGenesis: []string{ + // ... + // Must be exported before x/bank. + protocolpooltypes.ModuleName, + banktypes.ModuleName, +}, +``` + +Lastly, add an entry for protocolpool in the ModuleConfig. + +```go +{ + Name: protocolpooltypes.ModuleName, + Config: appconfig.WrapAny(&protocolpoolmodulev1.Module{}), +}, +``` + +## Adding Epochs Module **OPTIONAL** + +### Manual Wiring + +Import the following: + +```go +import ( + // ... + "github.com/cosmos/cosmos-sdk/x/epochs" + epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper" + epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types" +) +``` + +Add the epochs keeper to your application struct: + +```go +EpochsKeeper epochskeeper.Keeper +``` + +Add the store key: + +```go +keys := storetypes.NewKVStoreKeys( + // ... + epochstypes.StoreKey, +) +``` + +Instantiate the keeper: + +```go +app.EpochsKeeper = epochskeeper.NewKeeper( + runtime.NewKVStoreService(keys[epochstypes.StoreKey]), + appCodec, +) +``` + +Set up hooks for the epochs keeper: + +To learn how to write hooks for the epoch keeper, see the [x/epoch README](https://github.com/cosmos/cosmos-sdk/blob/main/x/epochs/README.md) + +```go +app.EpochsKeeper.SetHooks( + epochstypes.NewMultiEpochHooks( + // insert epoch hooks receivers here + app.SomeOtherModule + ), +) +``` + +Add the epochs module to the module manager: + +```go +app.ModuleManager = module.NewManager( + // ... + epochs.NewAppModule(appCodec, app.EpochsKeeper), +) +``` + +Add entries for SetOrderBeginBlockers and SetOrderInitGenesis: + +```go +app.ModuleManager.SetOrderBeginBlockers( + // ... + epochstypes.ModuleName, +) +``` + +```go +app.ModuleManager.SetOrderInitGenesis( + // ... + epochstypes.ModuleName, +) +``` + +### DI Wiring + +First, set up the keeper for the application. + +Import the epochs keeper: + +```go +epochskeeper "github.com/cosmos/cosmos-sdk/x/epochs/keeper" +``` + +Add the keeper to your application struct: + +```go +EpochsKeeper epochskeeper.Keeper +``` + +Add the keeper to the depinject system: + +```go +depinject.Inject( + appConfig, + &appBuilder, + &app.appCodec, + &app.legacyAmino, + &app.txConfig, + &app.interfaceRegistry, + // ... other modules + &app.EpochsKeeper, // NEW MODULE! +) +``` + +Next, set up configuration for the module. + +Import the following: + +```go +import ( + epochsmodulev1 "cosmossdk.io/api/cosmos/epochs/module/v1" + + _ "github.com/cosmos/cosmos-sdk/x/epochs" // import for side-effects + epochstypes "github.com/cosmos/cosmos-sdk/x/epochs/types" +) +``` + +Add an entry for BeginBlockers and InitGenesis: + +```go +BeginBlockers: []string{ + // ... + epochstypes.ModuleName, +}, +``` + +```go +InitGenesis: []string{ + // ... + epochstypes.ModuleName, +}, +``` + +Lastly, add an entry for epochs in the ModuleConfig: + +```go +{ + Name: epochstypes.ModuleName, + Config: appconfig.WrapAny(&epochsmodulev1.Module{}), +}, +``` + +## Enable Unordered Transactions **OPTIONAL** + +To enable unordered transaction support on an application, the `x/auth` keeper must be supplied with the `WithUnorderedTransactions` option. + +Note that unordered transactions require sequence values to be zero, and will **FAIL** if a non-zero sequence value is set. +Please ensure no sequence value is set when submitting an unordered transaction. +Services that rely on prior assumptions about sequence values should be updated to handle unordered transactions. +Services should be aware that when the transaction is unordered, the transaction sequence will always be zero. + +```go + app.AccountKeeper = authkeeper.NewAccountKeeper( + appCodec, + runtime.NewKVStoreService(keys[authtypes.StoreKey]), + authtypes.ProtoBaseAccount, + maccPerms, + authcodec.NewBech32Codec(sdk.Bech32MainPrefix), + sdk.Bech32MainPrefix, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + authkeeper.WithUnorderedTransactions(true), // new option! + ) +``` + +If using dependency injection, update the auth module config. + +```go + { + Name: authtypes.ModuleName, + Config: appconfig.WrapAny(&authmodulev1.Module{ + Bech32Prefix: "cosmos", + ModuleAccountPermissions: moduleAccPerms, + EnableUnorderedTransactions: true, // remove this line if you do not want unordered transactions. + }), + }, +``` + +By default, unordered transactions use a transaction timeout duration of 10 minutes and a default gas charge of 2240 gas units. +To modify these default values, pass in the corresponding options to the new `SigVerifyOptions` field in `x/auth's` `ante.HandlerOptions`. + +```go +options := ante.HandlerOptions{ + SigVerifyOptions: []ante.SigVerificationDecoratorOption{ + // change below as needed. + ante.WithUnorderedTxGasCost(ante.DefaultUnorderedTxGasCost), + ante.WithMaxUnorderedTxTimeoutDuration(ante.DefaultMaxTimoutDuration), + }, +} +``` + +```go +anteDecorators := []sdk.AnteDecorator{ + // ... other decorators ... + ante.NewSigVerificationDecorator(options.AccountKeeper, options.SignModeHandler, options.SigVerifyOptions...), // supply new options +} +``` + +## Upgrade Handler + +The upgrade handler only requires adding the store upgrades for the modules added above. +If your application is not adding `x/protocolpool` or `x/epochs`, you do not need to add the store upgrade. + +```go +// UpgradeName defines the on-chain upgrade name for the sample SimApp upgrade +// from v050 to v053. +// +// NOTE: This upgrade defines a reference implementation of what an upgrade +// could look like when an application is migrating from Cosmos SDK version +// v0.50.x to v0.53.x. +const UpgradeName = "v050-to-v053" + +func (app SimApp) RegisterUpgradeHandlers() { + app.UpgradeKeeper.SetUpgradeHandler( + UpgradeName, + func(ctx context.Context, _ upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM) + }, + ) + + upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() + if err != nil { + panic(err) + } + + if upgradeInfo.Name == UpgradeName && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { + storeUpgrades := storetypes.StoreUpgrades{ + Added: []string{ + epochstypes.ModuleName, // if not adding x/epochs to your chain, remove this line. + protocolpooltypes.ModuleName, // if not adding x/protocolpool to your chain, remove this line. + }, + } + + // configure store loader that checks if version == upgradeHeight and applies store upgrades + app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) + } +} +``` \ No newline at end of file diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 0000000..8b79372 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,227 @@ +# Upgrade Reference + +This document provides a quick reference for the upgrades from `v0.50.x` to `v0.53.x` of Cosmos SDK. + +Note, always read the **App Wiring Changes** section for more information on application wiring updates. + +🚨Upgrading to v0.53.x will require a **coordinated** chain upgrade.🚨 + +### TLDR; + +Unordered transactions, `x/protocolpool`, and `x/epoch` are the major new features added in v0.53.x. + +We also added the ability to add a `CheckTx` handler and enabled ed25519 signature verification. + +For a full list of changes, see the [Changelog](https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/CHANGELOG.md). + +### Unordered Transactions + +The Cosmos SDK now supports unordered transactions. _This is an opt-in feature_. + +Clients that use this feature may now submit their transactions in a fire-and-forget manner to chains that enabled unordered transactions. + +To submit an unordered transaction, clients must set the `unordered` flag to +`true` and ensure a reasonable `timeout_timestamp` is set. The `timeout_timestamp` is +used as a TTL for the transaction and provides replay protection. Each transaction's `timeout_timestamp` must be +unique to the account; however, the difference may be as small as a nanosecond. See [ADR-070](https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-070-unordered-transactions.md) for more details. + +Note that unordered transactions require sequence values to be zero, and will **FAIL** if a non-zero sequence value is set. +Please ensure no sequence value is set when submitting an unordered transaction. +Services that rely on prior assumptions about sequence values should be updated to handle unordered transactions. +Services should be aware that when the transaction is unordered, the transaction sequence will always be zero. + +#### Enabling Unordered Transactions + +To enable unordered transactions, supply the `WithUnorderedTransactions` option to the `x/auth` keeper: + +```go + app.AccountKeeper = authkeeper.NewAccountKeeper( + appCodec, + runtime.NewKVStoreService(keys[authtypes.StoreKey]), + authtypes.ProtoBaseAccount, + maccPerms, + authcodec.NewBech32Codec(sdk.Bech32MainPrefix), + sdk.Bech32MainPrefix, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + authkeeper.WithUnorderedTransactions(true), // new option! + ) +``` + +If using dependency injection, update the auth module config. + +```go + { + Name: authtypes.ModuleName, + Config: appconfig.WrapAny(&authmodulev1.Module{ + Bech32Prefix: "cosmos", + ModuleAccountPermissions: moduleAccPerms, + EnableUnorderedTransactions: true, // remove this line if you do not want unordered transactions. + }), + }, +``` + +By default, unordered transactions use a transaction timeout duration of 10 minutes and a default gas charge of 2240 gas units. +To modify these default values, pass in the corresponding options to the new `SigVerifyOptions` field in `x/auth's` `ante.HandlerOptions`. + +```go +options := ante.HandlerOptions{ + SigVerifyOptions: []ante.SigVerificationDecoratorOption{ + // change below as needed. + ante.WithUnorderedTxGasCost(ante.DefaultUnorderedTxGasCost), + ante.WithMaxUnorderedTxTimeoutDuration(ante.DefaultMaxTimoutDuration), + }, +} +``` + +```go +anteDecorators := []sdk.AnteDecorator{ + // ... other decorators ... + ante.NewSigVerificationDecorator(options.AccountKeeper, options.SignModeHandler, options.SigVerifyOptions...), // supply new options +} +``` + +### App Wiring Changes + +In this section, we describe the required app wiring changes to run a v0.53.x Cosmos SDK application. + +**These changes are directly applicable to your application wiring.** + +The `x/auth` module now contains a `PreBlocker` that _must_ be set in the module manager's `SetOrderPreBlockers` method. + +```go +app.ModuleManager.SetOrderPreBlockers( + upgradetypes.ModuleName, + authtypes.ModuleName, // NEW +) +``` + +That's it. + +### New Modules + +Below are some **optional** new modules you can include in your chain. +To see a full example of wiring these modules, please check out the [SimApp](https://github.com/cosmos/cosmos-sdk/blob/release/v0.53.x/simapp/app.go). + +#### Epochs + +⚠️Adding this module requires a `StoreUpgrade`⚠️ + +The new, supplemental `x/epochs` module provides Cosmos SDK modules functionality to register and execute custom logic at fixed time-intervals. + +Required wiring: +- Keeper Instantiation +- StoreKey addition +- Hooks Registration +- App Module Registration +- entry in SetOrderBeginBlockers +- entry in SetGenesisModuleOrder +- entry in SetExportModuleOrder + +#### ProtocolPool + +:::warning + +Using `protocolpool` will cause the following `x/distribution` handlers to return an error: + + +**QueryService** + +- `CommunityPool` + +**MsgService** + +- `CommunityPoolSpend` +- `FundCommunityPool` + +If you have services that rely on this functionality from `x/distribution`, please update them to use the `x/protocolpool` equivalents. + +::: + +⚠️Adding this module requires a `StoreUpgrade`⚠️ + +The new, supplemental `x/protocolpool` module provides extended functionality for managing and distributing block reward revenue. + +Required wiring: +- Module Account Permissions + - protocolpooltypes.ModuleName (nil) + - protocolpooltypes.ProtocolPoolEscrowAccount (nil) +- Keeper Instantiation +- StoreKey addition +- Passing the keeper to the Distribution Keeper + - `distrkeeper.WithExternalCommunityPool(app.ProtocolPoolKeeper)` +- App Module Registration +- entry in SetOrderBeginBlockers +- entry in SetOrderEndBlockers +- entry in SetGenesisModuleOrder +- entry in SetExportModuleOrder **before `x/bank`** + +## Custom Minting Function in `x/mint` + +This release introduces the ability to configure a custom mint function in `x/mint`. The minting logic is now abstracted as a `MintFn` with a default implementation that can be overridden. + +### What’s New + +- **Configurable Mint Function:** + A new `MintFn` abstraction is introduced. By default, the module uses `DefaultMintFn`, but you can supply your own implementation. + +- **Deprecated InflationCalculationFn Parameter:** + The `InflationCalculationFn` argument previously provided to `mint.NewAppModule()` is now ignored and must be `nil`. To customize the default minter’s inflation behavior, wrap your custom function with `mintkeeper.DefaultMintFn` and pass it via the `WithMintFn` option: + +```go + mintkeeper.WithMintFn(mintkeeper.DefaultMintFn(customInflationFn)) +``` + +### How to Upgrade + +1. **Using the Default Minting Function** + + No action is needed if you’re happy with the default behavior. Make sure your application wiring initializes the MintKeeper like this: + +```go + mintKeeper := mintkeeper.NewKeeper( + appCodec, + storeService, + stakingKeeper, + accountKeeper, + bankKeeper, + authtypes.FeeCollectorName, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) +``` + +2. **Using a Custom Minting Function** + + To use a custom minting function, define it as follows and pass it you your mintKeeper when constructing it: + +```go +func myCustomMintFunc(ctx sdk.Context, k *mintkeeper.Keeper) { + // do minting... +} + +// ... + mintKeeper := mintkeeper.NewKeeper( + appCodec, + storeService, + stakingKeeper, + accountKeeper, + bankKeeper, + authtypes.FeeCollectorName, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + mintkeeper.WithMintFn(myCustomMintFunc), // Use custom minting function + ) +``` + +### Misc Changes + +#### Testnet's init-files Command + +Some changes were made to `testnet`'s `init-files` command to support our new testing framework, `Systemtest`. + +##### Flag Changes + +- The flag for validator count was changed from `--v` to `--validator-count`(shorthand: `-v`). + +##### Flag Additions +- `--staking-denom` allows changing the default stake denom, `stake`. +- `--commit-timeout` enables changing the commit timeout of the chain. +- `--single-host` enables running a multi-node network on a single host. This bumps each subsequent node's network addresses by 1. For example, node1's gRPC address will be 9090, node2's 9091, etc... \ No newline at end of file diff --git a/baseapp/abci.go b/baseapp/abci.go new file mode 100644 index 0000000..27b21ed --- /dev/null +++ b/baseapp/abci.go @@ -0,0 +1,1392 @@ +package baseapp + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "github.com/cockroachdb/errors" + abci "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/gogoproto/proto" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + + coreheader "cosmossdk.io/core/header" + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/store/rootmulti" + snapshottypes "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// Supported ABCI Query prefixes and paths +const ( + QueryPathApp = "app" + QueryPathCustom = "custom" + QueryPathP2P = "p2p" + QueryPathStore = "store" + + QueryPathBroadcastTx = "/cosmos.tx.v1beta1.Service/BroadcastTx" +) + +func (app *BaseApp) InitChain(req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { + if req.ChainId != app.chainID { + return nil, fmt.Errorf("invalid chain-id on InitChain; expected: %s, got: %s", app.chainID, req.ChainId) + } + + // On a new chain, we consider the init chain block height as 0, even though + // req.InitialHeight is 1 by default. + initHeader := cmtproto.Header{ChainID: req.ChainId, Time: req.Time} + app.logger.Info("InitChain", "initialHeight", req.InitialHeight, "chainID", req.ChainId) + + // Set the initial height, which will be used to determine if we are proposing + // or processing the first block or not. + app.initialHeight = req.InitialHeight + if app.initialHeight == 0 { // If initial height is 0, set it to 1 + app.initialHeight = 1 + } + + // if req.InitialHeight is > 1, then we set the initial version on all stores + if req.InitialHeight > 1 { + initHeader.Height = req.InitialHeight + if err := app.cms.SetInitialVersion(req.InitialHeight); err != nil { + return nil, err + } + } + + // initialize states with a correct header + app.setState(execModeFinalize, initHeader) + app.setState(execModeCheck, initHeader) + + // Store the consensus params in the BaseApp's param store. Note, this must be + // done after the finalizeBlockState and context have been set as it's persisted + // to state. + if req.ConsensusParams != nil { + err := app.StoreConsensusParams(app.finalizeBlockState.Context(), *req.ConsensusParams) + if err != nil { + return nil, err + } + } + + defer func() { + // InitChain represents the state of the application BEFORE the first block, + // i.e. the genesis block. This means that when processing the app's InitChain + // handler, the block height is zero by default. However, after Commit is called + // the height needs to reflect the true block height. + initHeader.Height = req.InitialHeight + app.checkState.SetContext(app.checkState.Context().WithBlockHeader(initHeader). + WithHeaderInfo(coreheader.Info{ + ChainID: req.ChainId, + Height: req.InitialHeight, + Time: req.Time, + })) + app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockHeader(initHeader). + WithHeaderInfo(coreheader.Info{ + ChainID: req.ChainId, + Height: req.InitialHeight, + Time: req.Time, + })) + }() + + if app.initChainer == nil { + return &abci.ResponseInitChain{}, nil + } + + // add block gas meter for any genesis transactions (allow infinite gas) + app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(storetypes.NewInfiniteGasMeter())) + + res, err := app.initChainer(app.finalizeBlockState.Context(), req) + if err != nil { + return nil, err + } + + if len(req.Validators) > 0 { + if len(req.Validators) != len(res.Validators) { + return nil, fmt.Errorf( + "len(RequestInitChain.Validators) != len(GenesisValidators) (%d != %d)", + len(req.Validators), len(res.Validators), + ) + } + + sort.Sort(abci.ValidatorUpdates(req.Validators)) + sort.Sort(abci.ValidatorUpdates(res.Validators)) + + for i := range res.Validators { + if !proto.Equal(&res.Validators[i], &req.Validators[i]) { + return nil, fmt.Errorf("genesisValidators[%d] != req.Validators[%d] ", i, i) + } + } + } + + // NOTE: We don't commit, but FinalizeBlock for block InitialHeight starts from + // this FinalizeBlockState. + return &abci.ResponseInitChain{ + ConsensusParams: res.ConsensusParams, + Validators: res.Validators, + AppHash: app.LastCommitID().Hash, + }, nil +} + +func (app *BaseApp) Info(_ *abci.RequestInfo) (*abci.ResponseInfo, error) { + lastCommitID := app.cms.LastCommitID() + + return &abci.ResponseInfo{ + Data: app.name, + Version: app.version, + AppVersion: app.appVersion, + LastBlockHeight: lastCommitID.Version, + LastBlockAppHash: lastCommitID.Hash, + }, nil +} + +// Query implements the ABCI interface. It delegates to CommitMultiStore if it +// implements Queryable. +func (app *BaseApp) Query(_ context.Context, req *abci.RequestQuery) (resp *abci.ResponseQuery, err error) { + // add panic recovery for all queries + // + // Ref: https://github.com/cosmos/cosmos-sdk/pull/8039 + defer func() { + if r := recover(); r != nil { + resp = sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrPanic, "%v", r), app.trace) + } + }() + + // when a client did not provide a query height, manually inject the latest + if req.Height == 0 { + req.Height = app.LastBlockHeight() + } + + telemetry.IncrCounter(1, "query", "count") + telemetry.IncrCounter(1, "query", req.Path) + defer telemetry.MeasureSince(telemetry.Now(), req.Path) + + if req.Path == QueryPathBroadcastTx { + return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "can't route a broadcast tx message"), app.trace), nil + } + + // handle gRPC routes first rather than calling splitPath because '/' characters + // are used as part of gRPC paths + if grpcHandler := app.grpcQueryRouter.Route(req.Path); grpcHandler != nil { + return app.handleQueryGRPC(grpcHandler, req), nil + } + + path := SplitABCIQueryPath(req.Path) + if len(path) == 0 { + return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "no query path provided"), app.trace), nil + } + + switch path[0] { + case QueryPathApp: + // "/app" prefix for special application queries + resp = handleQueryApp(app, path, req) + + case QueryPathStore: + resp = handleQueryStore(app, path, *req) + + case QueryPathP2P: + resp = handleQueryP2P(app, path) + + default: + resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "unknown query path"), app.trace) + } + + return resp, nil +} + +// ListSnapshots implements the ABCI interface. It delegates to app.snapshotManager if set. +func (app *BaseApp) ListSnapshots(req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { + resp := &abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{}} + if app.snapshotManager == nil { + return resp, nil + } + + snapshots, err := app.snapshotManager.List() + if err != nil { + app.logger.Error("failed to list snapshots", "err", err) + return nil, err + } + + for _, snapshot := range snapshots { + abciSnapshot, err := snapshot.ToABCI() + if err != nil { + app.logger.Error("failed to convert ABCI snapshots", "err", err) + return nil, err + } + + resp.Snapshots = append(resp.Snapshots, &abciSnapshot) + } + + return resp, nil +} + +// LoadSnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set. +func (app *BaseApp) LoadSnapshotChunk(req *abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { + if app.snapshotManager == nil { + return &abci.ResponseLoadSnapshotChunk{}, nil + } + + chunk, err := app.snapshotManager.LoadChunk(req.Height, req.Format, req.Chunk) + if err != nil { + app.logger.Error( + "failed to load snapshot chunk", + "height", req.Height, + "format", req.Format, + "chunk", req.Chunk, + "err", err, + ) + return nil, err + } + + return &abci.ResponseLoadSnapshotChunk{Chunk: chunk}, nil +} + +// OfferSnapshot implements the ABCI interface. It delegates to app.snapshotManager if set. +func (app *BaseApp) OfferSnapshot(req *abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) { + if app.snapshotManager == nil { + app.logger.Error("snapshot manager not configured") + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil + } + + if req.Snapshot == nil { + app.logger.Error("received nil snapshot") + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil + } + + snapshot, err := snapshottypes.SnapshotFromABCI(req.Snapshot) + if err != nil { + app.logger.Error("failed to decode snapshot metadata", "err", err) + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil + } + + err = app.snapshotManager.Restore(snapshot) + switch { + case err == nil: + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil + + case errors.Is(err, snapshottypes.ErrUnknownFormat): + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil + + case errors.Is(err, snapshottypes.ErrInvalidMetadata): + app.logger.Error( + "rejecting invalid snapshot", + "height", req.Snapshot.Height, + "format", req.Snapshot.Format, + "err", err, + ) + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil + + default: + // CometBFT errors are defined here: https://github.com/cometbft/cometbft/blob/main/statesync/syncer.go + // It may happen that in case of a CometBFT error, such as a timeout (which occurs after two minutes), + // the process is aborted. This is done intentionally because deleting the database programmatically + // can lead to more complicated situations. + app.logger.Error( + "failed to restore snapshot", + "height", req.Snapshot.Height, + "format", req.Snapshot.Format, + "err", err, + ) + + // We currently don't support resetting the IAVL stores and retrying a + // different snapshot, so we ask CometBFT to abort all snapshot restoration. + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil + } +} + +// ApplySnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set. +func (app *BaseApp) ApplySnapshotChunk(req *abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { + if app.snapshotManager == nil { + app.logger.Error("snapshot manager not configured") + return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT}, nil + } + + _, err := app.snapshotManager.RestoreChunk(req.Chunk) + switch { + case err == nil: + return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil + + case errors.Is(err, snapshottypes.ErrChunkHashMismatch): + app.logger.Error( + "chunk checksum mismatch; rejecting sender and requesting refetch", + "chunk", req.Index, + "sender", req.Sender, + "err", err, + ) + return &abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_RETRY, + RefetchChunks: []uint32{req.Index}, + RejectSenders: []string{req.Sender}, + }, nil + + default: + app.logger.Error("failed to restore snapshot", "err", err) + return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT}, nil + } +} + +// CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In +// CheckTx mode, messages are not executed. This means messages are only validated +// and only the AnteHandler is executed. State is persisted to the BaseApp's +// internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx +// will contain relevant error information. Regardless of tx execution outcome, +// the ResponseCheckTx will contain relevant gas execution context. +func (app *BaseApp) CheckTx(req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { + var mode execMode + + switch req.Type { + case abci.CheckTxType_New: + mode = execModeCheck + + case abci.CheckTxType_Recheck: + mode = execModeReCheck + + default: + return nil, fmt.Errorf("unknown RequestCheckTx type: %s", req.Type) + } + + if app.checkTxHandler == nil { + gInfo, result, anteEvents, err := app.runTx(mode, req.Tx, nil) + if err != nil { + return sdkerrors.ResponseCheckTxWithEvents(err, gInfo.GasWanted, gInfo.GasUsed, anteEvents, app.trace), nil + } + + return &abci.ResponseCheckTx{ + GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? + GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints? + Log: result.Log, + Data: result.Data, + Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), + }, nil + } + + // Create wrapper to avoid users overriding the execution mode + runTx := func(txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) { + return app.runTx(mode, txBytes, tx) + } + + return app.checkTxHandler(runTx, req) +} + +// PrepareProposal implements the PrepareProposal ABCI method and returns a +// ResponsePrepareProposal object to the client. The PrepareProposal method is +// responsible for allowing the block proposer to perform application-dependent +// work in a block before proposing it. +// +// Transactions can be modified, removed, or added by the application. Since the +// application maintains its own local mempool, it will ignore the transactions +// provided to it in RequestPrepareProposal. Instead, it will determine which +// transactions to return based on the mempool's semantics and the MaxTxBytes +// provided by the client's request. +// +// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md +// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md +func (app *BaseApp) PrepareProposal(req *abci.RequestPrepareProposal) (resp *abci.ResponsePrepareProposal, err error) { + if app.prepareProposal == nil { + return nil, errors.New("PrepareProposal handler not set") + } + + // Always reset state given that PrepareProposal can timeout and be called + // again in a subsequent round. + header := cmtproto.Header{ + ChainID: app.chainID, + Height: req.Height, + Time: req.Time, + ProposerAddress: req.ProposerAddress, + NextValidatorsHash: req.NextValidatorsHash, + AppHash: app.LastCommitID().Hash, + } + app.setState(execModePrepareProposal, header) + + // CometBFT must never call PrepareProposal with a height of 0. + // + // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38 + if req.Height < 1 { + return nil, errors.New("PrepareProposal called with invalid height") + } + + app.prepareProposalState.SetContext(app.getContextForProposal(app.prepareProposalState.Context(), req.Height). + WithVoteInfos(toVoteInfo(req.LocalLastCommit.Votes)). // this is a set of votes that are not finalized yet, wait for commit + WithBlockHeight(req.Height). + WithBlockTime(req.Time). + WithProposer(req.ProposerAddress). + WithExecMode(sdk.ExecModePrepareProposal). + WithCometInfo(prepareProposalInfo{req}). + WithHeaderInfo(coreheader.Info{ + ChainID: app.chainID, + Height: req.Height, + Time: req.Time, + })) + + app.prepareProposalState.SetContext(app.prepareProposalState.Context(). + WithConsensusParams(app.GetConsensusParams(app.prepareProposalState.Context())). + WithBlockGasMeter(app.getBlockGasMeter(app.prepareProposalState.Context()))) + + defer func() { + if err := recover(); err != nil { + app.logger.Error( + "panic recovered in PrepareProposal", + "height", req.Height, + "time", req.Time, + "panic", err, + ) + + resp = &abci.ResponsePrepareProposal{Txs: req.Txs} + } + }() + + resp, err = app.prepareProposal(app.prepareProposalState.Context(), req) + if err != nil { + app.logger.Error("failed to prepare proposal", "height", req.Height, "time", req.Time, "err", err) + return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil + } + + return resp, nil +} + +// ProcessProposal implements the ProcessProposal ABCI method and returns a +// ResponseProcessProposal object to the client. The ProcessProposal method is +// responsible for allowing execution of application-dependent work in a proposed +// block. Note, the application defines the exact implementation details of +// ProcessProposal. In general, the application must at the very least ensure +// that all transactions are valid. If all transactions are valid, then we inform +// CometBFT that the Status is ACCEPT. However, the application is also able +// to implement optimizations such as executing the entire proposed block +// immediately. +// +// If a panic is detected during execution of an application's ProcessProposal +// handler, it will be recovered and we will reject the proposal. +// +// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md +// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md +func (app *BaseApp) ProcessProposal(req *abci.RequestProcessProposal) (resp *abci.ResponseProcessProposal, err error) { + if app.processProposal == nil { + return nil, errors.New("ProcessProposal handler not set") + } + + // CometBFT must never call ProcessProposal with a height of 0. + // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38 + if req.Height < 1 { + return nil, errors.New("ProcessProposal called with invalid height") + } + + // Always reset state given that ProcessProposal can timeout and be called + // again in a subsequent round. + header := cmtproto.Header{ + ChainID: app.chainID, + Height: req.Height, + Time: req.Time, + ProposerAddress: req.ProposerAddress, + NextValidatorsHash: req.NextValidatorsHash, + AppHash: app.LastCommitID().Hash, + } + app.setState(execModeProcessProposal, header) + + // Since the application can get access to FinalizeBlock state and write to it, + // we must be sure to reset it in case ProcessProposal timeouts and is called + // again in a subsequent round. However, we only want to do this after we've + // processed the first block, as we want to avoid overwriting the finalizeState + // after state changes during InitChain. + if req.Height > app.initialHeight { + // abort any running OE + app.optimisticExec.Abort() + app.setState(execModeFinalize, header) + } + + app.processProposalState.SetContext(app.getContextForProposal(app.processProposalState.Context(), req.Height). + WithVoteInfos(req.ProposedLastCommit.Votes). // this is a set of votes that are not finalized yet, wait for commit + WithBlockHeight(req.Height). + WithBlockTime(req.Time). + WithHeaderHash(req.Hash). + WithProposer(req.ProposerAddress). + WithCometInfo(cometInfo{ProposerAddress: req.ProposerAddress, ValidatorsHash: req.NextValidatorsHash, Misbehavior: req.Misbehavior, LastCommit: req.ProposedLastCommit}). + WithExecMode(sdk.ExecModeProcessProposal). + WithHeaderInfo(coreheader.Info{ + ChainID: app.chainID, + Height: req.Height, + Time: req.Time, + })) + + app.processProposalState.SetContext(app.processProposalState.Context(). + WithConsensusParams(app.GetConsensusParams(app.processProposalState.Context())). + WithBlockGasMeter(app.getBlockGasMeter(app.processProposalState.Context()))) + + defer func() { + if err := recover(); err != nil { + app.logger.Error( + "panic recovered in ProcessProposal", + "height", req.Height, + "time", req.Time, + "hash", fmt.Sprintf("%X", req.Hash), + "panic", err, + ) + resp = &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT} + } + }() + + resp, err = app.processProposal(app.processProposalState.Context(), req) + if err != nil { + app.logger.Error("failed to process proposal", "height", req.Height, "time", req.Time, "hash", fmt.Sprintf("%X", req.Hash), "err", err) + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + + // Only execute optimistic execution if the proposal is accepted, OE is + // enabled and the block height is greater than the initial height. During + // the first block we'll be carrying state from InitChain, so it would be + // impossible for us to easily revert. + // After the first block has been processed, the next blocks will get executed + // optimistically, so that when the ABCI client calls `FinalizeBlock` the app + // can have a response ready. + if resp.Status == abci.ResponseProcessProposal_ACCEPT && + app.optimisticExec.Enabled() && + req.Height > app.initialHeight { + app.optimisticExec.Execute(req) + } + + return resp, nil +} + +// ExtendVote implements the ExtendVote ABCI method and returns a ResponseExtendVote. +// It calls the application's ExtendVote handler which is responsible for performing +// application-specific business logic when sending a pre-commit for the NEXT +// block height. The extensions response may be non-deterministic but must always +// be returned, even if empty. +// +// Agreed upon vote extensions are made available to the proposer of the next +// height and are committed in the subsequent height, i.e. H+2. An error is +// returned if vote extensions are not enabled or if extendVote fails or panics. +func (app *BaseApp) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (resp *abci.ResponseExtendVote, err error) { + // Always reset state given that ExtendVote and VerifyVoteExtension can timeout + // and be called again in a subsequent round. + var ctx sdk.Context + + // If we're extending the vote for the initial height, we need to use the + // finalizeBlockState context, otherwise we don't get the uncommitted data + // from InitChain. + if req.Height == app.initialHeight { + ctx, _ = app.finalizeBlockState.Context().CacheContext() + } else { + emptyHeader := cmtproto.Header{ChainID: app.chainID, Height: req.Height} + ms := app.cms.CacheMultiStore() + ctx = sdk.NewContext(ms, emptyHeader, false, app.logger).WithStreamingManager(app.streamingManager) + } + + if app.extendVote == nil { + return nil, errors.New("application ExtendVote handler not set") + } + + // If vote extensions are not enabled, as a safety precaution, we return an + // error. + cp := app.GetConsensusParams(ctx) + + // Note: In this case, we do want to extend vote if the height is equal or + // greater than VoteExtensionsEnableHeight. This defers from the check done + // in ValidateVoteExtensions and PrepareProposal in which we'll check for + // vote extensions on VoteExtensionsEnableHeight+1. + extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 + if !extsEnabled { + return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to ExtendVote at height %d", req.Height) + } + + ctx = ctx. + WithConsensusParams(cp). + WithBlockGasMeter(storetypes.NewInfiniteGasMeter()). + WithBlockHeight(req.Height). + WithHeaderHash(req.Hash). + WithExecMode(sdk.ExecModeVoteExtension). + WithHeaderInfo(coreheader.Info{ + ChainID: app.chainID, + Height: req.Height, + Hash: req.Hash, + }) + + // add a deferred recover handler in case extendVote panics + defer func() { + if r := recover(); r != nil { + app.logger.Error( + "panic recovered in ExtendVote", + "height", req.Height, + "hash", fmt.Sprintf("%X", req.Hash), + "panic", err, + ) + err = fmt.Errorf("recovered application panic in ExtendVote: %v", r) + } + }() + + resp, err = app.extendVote(ctx, req) + if err != nil { + app.logger.Error("failed to extend vote", "height", req.Height, "hash", fmt.Sprintf("%X", req.Hash), "err", err) + return &abci.ResponseExtendVote{VoteExtension: []byte{}}, nil + } + + return resp, err +} + +// VerifyVoteExtension implements the VerifyVoteExtension ABCI method and returns +// a ResponseVerifyVoteExtension. It calls the applications' VerifyVoteExtension +// handler which is responsible for performing application-specific business +// logic in verifying a vote extension from another validator during the pre-commit +// phase. The response MUST be deterministic. An error is returned if vote +// extensions are not enabled or if verifyVoteExt fails or panics. +func (app *BaseApp) VerifyVoteExtension(req *abci.RequestVerifyVoteExtension) (resp *abci.ResponseVerifyVoteExtension, err error) { + if app.verifyVoteExt == nil { + return nil, errors.New("application VerifyVoteExtension handler not set") + } + + var ctx sdk.Context + + // If we're verifying the vote for the initial height, we need to use the + // finalizeBlockState context, otherwise we don't get the uncommitted data + // from InitChain. + if req.Height == app.initialHeight { + ctx, _ = app.finalizeBlockState.Context().CacheContext() + } else { + emptyHeader := cmtproto.Header{ChainID: app.chainID, Height: req.Height} + ms := app.cms.CacheMultiStore() + ctx = sdk.NewContext(ms, emptyHeader, false, app.logger).WithStreamingManager(app.streamingManager) + } + + // If vote extensions are not enabled, as a safety precaution, we return an + // error. + cp := app.GetConsensusParams(ctx) + + // Note: we verify votes extensions on VoteExtensionsEnableHeight+1. Check + // comment in ExtendVote and ValidateVoteExtensions for more details. + extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 + if !extsEnabled { + return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to VerifyVoteExtension at height %d", req.Height) + } + + // add a deferred recover handler in case verifyVoteExt panics + defer func() { + if r := recover(); r != nil { + app.logger.Error( + "panic recovered in VerifyVoteExtension", + "height", req.Height, + "hash", fmt.Sprintf("%X", req.Hash), + "validator", fmt.Sprintf("%X", req.ValidatorAddress), + "panic", r, + ) + err = fmt.Errorf("recovered application panic in VerifyVoteExtension: %v", r) + } + }() + + ctx = ctx. + WithConsensusParams(cp). + WithBlockGasMeter(storetypes.NewInfiniteGasMeter()). + WithBlockHeight(req.Height). + WithHeaderHash(req.Hash). + WithExecMode(sdk.ExecModeVerifyVoteExtension). + WithHeaderInfo(coreheader.Info{ + ChainID: app.chainID, + Height: req.Height, + Hash: req.Hash, + }) + + resp, err = app.verifyVoteExt(ctx, req) + if err != nil { + app.logger.Error("failed to verify vote extension", "height", req.Height, "err", err) + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil + } + + return resp, err +} + +// internalFinalizeBlock executes the block, called by the Optimistic +// Execution flow or by the FinalizeBlock ABCI method. The context received is +// only used to handle early cancellation, for anything related to state app.finalizeBlockState.Context() +// must be used. +func (app *BaseApp) internalFinalizeBlock(ctx context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + var events []abci.Event + + if err := app.checkHalt(req.Height, req.Time); err != nil { + return nil, err + } + + if err := app.validateFinalizeBlockHeight(req); err != nil { + return nil, err + } + + if app.cms.TracingEnabled() { + app.cms.SetTracingContext(storetypes.TraceContext( + map[string]any{"blockHeight": req.Height}, + )) + } + + header := cmtproto.Header{ + ChainID: app.chainID, + Height: req.Height, + Time: req.Time, + ProposerAddress: req.ProposerAddress, + NextValidatorsHash: req.NextValidatorsHash, + AppHash: app.LastCommitID().Hash, + } + + // finalizeBlockState should be set on InitChain or ProcessProposal. If it is + // nil, it means we are replaying this block and we need to set the state here + // given that during block replay ProcessProposal is not executed by CometBFT. + if app.finalizeBlockState == nil { + app.setState(execModeFinalize, header) + } + + // Context is now updated with Header information. + app.finalizeBlockState.SetContext(app.finalizeBlockState.Context(). + WithBlockHeader(header). + WithHeaderHash(req.Hash). + WithHeaderInfo(coreheader.Info{ + ChainID: app.chainID, + Height: req.Height, + Time: req.Time, + Hash: req.Hash, + AppHash: app.LastCommitID().Hash, + }). + WithConsensusParams(app.GetConsensusParams(app.finalizeBlockState.Context())). + WithVoteInfos(req.DecidedLastCommit.Votes). + WithExecMode(sdk.ExecModeFinalize). + WithCometInfo(cometInfo{ + Misbehavior: req.Misbehavior, + ValidatorsHash: req.NextValidatorsHash, + ProposerAddress: req.ProposerAddress, + LastCommit: req.DecidedLastCommit, + })) + + // GasMeter must be set after we get a context with updated consensus params. + gasMeter := app.getBlockGasMeter(app.finalizeBlockState.Context()) + app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter)) + + if app.checkState != nil { + app.checkState.SetContext(app.checkState.Context(). + WithBlockGasMeter(gasMeter). + WithHeaderHash(req.Hash)) + } + + preblockEvents, err := app.preBlock(req) + if err != nil { + return nil, err + } + + events = append(events, preblockEvents...) + + beginBlock, err := app.beginBlock(req) + if err != nil { + return nil, err + } + + // First check for an abort signal after beginBlock, as it's the first place + // we spend any significant amount of time. + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // continue + } + + events = append(events, beginBlock.Events...) + + // Reset the gas meter so that the AnteHandlers aren't required to + gasMeter = app.getBlockGasMeter(app.finalizeBlockState.Context()) + app.finalizeBlockState.SetContext(app.finalizeBlockState.Context().WithBlockGasMeter(gasMeter)) + + // Iterate over all raw transactions in the proposal and attempt to execute + // them, gathering the execution results. + // + // NOTE: Not all raw transactions may adhere to the sdk.Tx interface, e.g. + // vote extensions, so skip those. + txResults := make([]*abci.ExecTxResult, 0, len(req.Txs)) + for _, rawTx := range req.Txs { + var response *abci.ExecTxResult + + if _, err := app.txDecoder(rawTx); err == nil { + response = app.deliverTx(rawTx) + } else { + // In the case where a transaction included in a block proposal is malformed, + // we still want to return a default response to comet. This is because comet + // expects a response for each transaction included in a block proposal. + response = sdkerrors.ResponseExecTxResultWithEvents( + sdkerrors.ErrTxDecode, + 0, + 0, + nil, + false, + ) + } + + // check after every tx if we should abort + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // continue + } + + txResults = append(txResults, response) + } + + if app.finalizeBlockState.ms.TracingEnabled() { + app.finalizeBlockState.ms = app.finalizeBlockState.ms.SetTracingContext(nil).(storetypes.CacheMultiStore) + } + + endBlock, err := app.endBlock(app.finalizeBlockState.Context()) + if err != nil { + return nil, err + } + + // check after endBlock if we should abort, to avoid propagating the result + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + // continue + } + + events = append(events, endBlock.Events...) + cp := app.GetConsensusParams(app.finalizeBlockState.Context()) + + return &abci.ResponseFinalizeBlock{ + Events: events, + TxResults: txResults, + ValidatorUpdates: endBlock.ValidatorUpdates, + ConsensusParamUpdates: &cp, + }, nil +} + +// FinalizeBlock will execute the block proposal provided by RequestFinalizeBlock. +// Specifically, it will execute an application's BeginBlock (if defined), followed +// by the transactions in the proposal, finally followed by the application's +// EndBlock (if defined). +// +// For each raw transaction, i.e. a byte slice, BaseApp will only execute it if +// it adheres to the sdk.Tx interface. Otherwise, the raw transaction will be +// skipped. This is to support compatibility with proposers injecting vote +// extensions into the proposal, which should not themselves be executed in cases +// where they adhere to the sdk.Tx interface. +func (app *BaseApp) FinalizeBlock(req *abci.RequestFinalizeBlock) (res *abci.ResponseFinalizeBlock, err error) { + defer func() { + if res == nil { + return + } + // call the streaming service hooks with the FinalizeBlock messages + for _, streamingListener := range app.streamingManager.ABCIListeners { + if err := streamingListener.ListenFinalizeBlock(app.finalizeBlockState.Context(), *req, *res); err != nil { + app.logger.Error("ListenFinalizeBlock listening hook failed", "height", req.Height, "err", err) + } + } + }() + + if app.optimisticExec.Initialized() { + // check if the hash we got is the same as the one we are executing + aborted := app.optimisticExec.AbortIfNeeded(req.Hash) + // Wait for the OE to finish, regardless of whether it was aborted or not + res, err = app.optimisticExec.WaitResult() + + // only return if we are not aborting + if !aborted { + if res != nil { + res.AppHash = app.workingHash() + } + + return res, err + } + + // if it was aborted, we need to reset the state + app.finalizeBlockState = nil + app.optimisticExec.Reset() + } + + // if no OE is running, just run the block (this is either a block replay or a OE that got aborted) + res, err = app.internalFinalizeBlock(context.Background(), req) + if res != nil { + res.AppHash = app.workingHash() + } + + return res, err +} + +// checkHalt checkes if height or time exceeds halt-height or halt-time respectively. +func (app *BaseApp) checkHalt(height int64, time time.Time) error { + var halt bool + switch { + case app.haltHeight > 0 && uint64(height) >= app.haltHeight: + halt = true + + case app.haltTime > 0 && time.Unix() >= int64(app.haltTime): + halt = true + } + + if halt { + return fmt.Errorf("halt per configuration height %d time %d", app.haltHeight, app.haltTime) + } + + return nil +} + +// Commit implements the ABCI interface. It will commit all state that exists in +// the deliver state's multi-store and includes the resulting commit ID in the +// returned abci.ResponseCommit. Commit will set the check state based on the +// latest header and reset the deliver state. Also, if a non-zero halt height is +// defined in config, Commit will execute a deferred function call to check +// against that height and gracefully halt if it matches the latest committed +// height. +func (app *BaseApp) Commit() (*abci.ResponseCommit, error) { + header := app.finalizeBlockState.Context().BlockHeader() + retainHeight := app.GetBlockRetentionHeight(header.Height) + + if app.precommiter != nil { + app.precommiter(app.finalizeBlockState.Context()) + } + + rms, ok := app.cms.(*rootmulti.Store) + if ok { + rms.SetCommitHeader(header) + } + + app.cms.Commit() + + resp := &abci.ResponseCommit{ + RetainHeight: retainHeight, + } + + abciListeners := app.streamingManager.ABCIListeners + if len(abciListeners) > 0 { + ctx := app.finalizeBlockState.Context() + blockHeight := ctx.BlockHeight() + changeSet := app.cms.PopStateCache() + + for _, abciListener := range abciListeners { + if err := abciListener.ListenCommit(ctx, *resp, changeSet); err != nil { + app.logger.Error("Commit listening hook failed", "height", blockHeight, "err", err) + } + } + } + + // Reset the CheckTx state to the latest committed. + // + // NOTE: This is safe because CometBFT holds a lock on the mempool for + // Commit. Use the header from this latest block. + app.setState(execModeCheck, header) + + app.finalizeBlockState = nil + + if app.prepareCheckStater != nil { + app.prepareCheckStater(app.checkState.Context()) + } + + // The SnapshotIfApplicable method will create the snapshot by starting the goroutine + app.snapshotManager.SnapshotIfApplicable(header.Height) + + return resp, nil +} + +// workingHash gets the apphash that will be finalized in commit. +// These writes will be persisted to the root multi-store (app.cms) and flushed to +// disk in the Commit phase. This means when the ABCI client requests Commit(), the application +// state transitions will be flushed to disk and as a result, but we already have +// an application Merkle root. +func (app *BaseApp) workingHash() []byte { + // Write the FinalizeBlock state into branched storage and commit the MultiStore. + // The write to the FinalizeBlock state writes all state transitions to the root + // MultiStore (app.cms) so when Commit() is called it persists those values. + app.finalizeBlockState.ms.Write() + + // Get the hash of all writes in order to return the apphash to the comet in finalizeBlock. + commitHash := app.cms.WorkingHash() + app.logger.Debug("hash of all writes", "workingHash", fmt.Sprintf("%X", commitHash)) + + return commitHash +} + +func handleQueryApp(app *BaseApp, path []string, req *abci.RequestQuery) *abci.ResponseQuery { + if len(path) >= 2 { + switch path[1] { + case "simulate": + txBytes := req.Data + + gInfo, res, err := app.Simulate(txBytes) + if err != nil { + return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to simulate tx"), app.trace) + } + + simRes := &sdk.SimulationResponse{ + GasInfo: gInfo, + Result: res, + } + + bz, err := codec.ProtoMarshalJSON(simRes, app.interfaceRegistry) + if err != nil { + return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to JSON encode simulation response"), app.trace) + } + + return &abci.ResponseQuery{ + Codespace: sdkerrors.RootCodespace, + Height: req.Height, + Value: bz, + } + + case "version": + return &abci.ResponseQuery{ + Codespace: sdkerrors.RootCodespace, + Height: req.Height, + Value: []byte(app.version), + } + + default: + return sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace) + } + } + + return sdkerrors.QueryResult( + errorsmod.Wrap( + sdkerrors.ErrUnknownRequest, + "expected second parameter to be either 'simulate' or 'version', neither was present", + ), app.trace) +} + +func handleQueryStore(app *BaseApp, path []string, req abci.RequestQuery) *abci.ResponseQuery { + // "/store" prefix for store queries + queryable, ok := app.cms.(storetypes.Queryable) + if !ok { + return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "multi-store does not support queries"), app.trace) + } + + req.Path = "/" + strings.Join(path[1:], "/") + + if req.Height <= 1 && req.Prove { + return sdkerrors.QueryResult( + errorsmod.Wrap( + sdkerrors.ErrInvalidRequest, + "cannot query with proof when height <= 1; please provide a valid height", + ), app.trace) + } + + sdkReq := storetypes.RequestQuery(req) + resp, err := queryable.Query(&sdkReq) + if err != nil { + return sdkerrors.QueryResult(err, app.trace) + } + resp.Height = req.Height + + abciResp := abci.ResponseQuery(*resp) + + return &abciResp +} + +func handleQueryP2P(app *BaseApp, path []string) *abci.ResponseQuery { + // "/p2p" prefix for p2p queries + if len(path) < 4 { + return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "path should be p2p filter "), app.trace) + } + + var resp *abci.ResponseQuery + + cmd, typ, arg := path[1], path[2], path[3] + switch cmd { + case "filter": + switch typ { + case "addr": + resp = app.FilterPeerByAddrPort(arg) + + case "id": + resp = app.FilterPeerByID(arg) + } + + default: + resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace) + } + + return resp +} + +// SplitABCIQueryPath splits a string path using the delimiter '/'. +// +// e.g. "this/is/funny" becomes []string{"this", "is", "funny"} +func SplitABCIQueryPath(requestPath string) (path []string) { + path = strings.Split(requestPath, "/") + + // first element is empty string + if len(path) > 0 && path[0] == "" { + path = path[1:] + } + + return path +} + +// FilterPeerByAddrPort filters peers by address/port. +func (app *BaseApp) FilterPeerByAddrPort(info string) *abci.ResponseQuery { + if app.addrPeerFilter != nil { + return app.addrPeerFilter(info) + } + + return &abci.ResponseQuery{} +} + +// FilterPeerByID filters peers by node ID. +func (app *BaseApp) FilterPeerByID(info string) *abci.ResponseQuery { + if app.idPeerFilter != nil { + return app.idPeerFilter(info) + } + + return &abci.ResponseQuery{} +} + +// getContextForProposal returns the correct Context for PrepareProposal and +// ProcessProposal. We use finalizeBlockState on the first block to be able to +// access any state changes made in InitChain. +func (app *BaseApp) getContextForProposal(ctx sdk.Context, height int64) sdk.Context { + if height == app.initialHeight { + ctx, _ = app.finalizeBlockState.Context().CacheContext() + + // clear all context data set during InitChain to avoid inconsistent behavior + ctx = ctx.WithBlockHeader(cmtproto.Header{}).WithHeaderInfo(coreheader.Info{}) + return ctx + } + + return ctx +} + +func (app *BaseApp) handleQueryGRPC(handler GRPCQueryHandler, req *abci.RequestQuery) *abci.ResponseQuery { + ctx, err := app.CreateQueryContext(req.Height, req.Prove) + if err != nil { + return sdkerrors.QueryResult(err, app.trace) + } + + resp, err := handler(ctx, req) + if err != nil { + resp = sdkerrors.QueryResult(gRPCErrorToSDKError(err), app.trace) + resp.Height = req.Height + return resp + } + + return resp +} + +func gRPCErrorToSDKError(err error) error { + status, ok := grpcstatus.FromError(err) + if !ok { + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error()) + } + + switch status.Code() { + case codes.NotFound: + return errorsmod.Wrap(sdkerrors.ErrKeyNotFound, err.Error()) + + case codes.InvalidArgument: + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error()) + + case codes.FailedPrecondition: + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error()) + + case codes.Unauthenticated: + return errorsmod.Wrap(sdkerrors.ErrUnauthorized, err.Error()) + + default: + return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, err.Error()) + } +} + +func checkNegativeHeight(height int64) error { + if height < 0 { + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "cannot query with height < 0; please provide a valid height") + } + + return nil +} + +// CreateQueryContext creates a new sdk.Context for a query, taking as args +// the block height and whether the query needs a proof or not. +func (app *BaseApp) CreateQueryContext(height int64, prove bool) (sdk.Context, error) { + return app.CreateQueryContextWithCheckHeader(height, prove, true) +} + +// CreateQueryContextWithCheckHeader creates a new sdk.Context for a query, taking as args +// the block height, whether the query needs a proof or not, and whether to check the header or not. +func (app *BaseApp) CreateQueryContextWithCheckHeader(height int64, prove, checkHeader bool) (sdk.Context, error) { + if err := checkNegativeHeight(height); err != nil { + return sdk.Context{}, err + } + + // use custom query multi-store if provided + qms := app.qms + if qms == nil { + qms = app.cms.(storetypes.MultiStore) + } + + lastBlockHeight := qms.LatestVersion() + if lastBlockHeight == 0 { + return sdk.Context{}, errorsmod.Wrapf(sdkerrors.ErrInvalidHeight, "%s is not ready; please wait for first block", app.Name()) + } + + if height > lastBlockHeight { + return sdk.Context{}, + errorsmod.Wrap( + sdkerrors.ErrInvalidHeight, + "cannot query with height in the future; please provide a valid height", + ) + } + + if height == 1 && prove { + return sdk.Context{}, + errorsmod.Wrap( + sdkerrors.ErrInvalidRequest, + "cannot query with proof when height <= 1; please provide a valid height", + ) + } + + var header *cmtproto.Header + isLatest := height == 0 + for _, state := range []*state{ + app.checkState, + app.finalizeBlockState, + } { + if state != nil { + // branch the commit multi-store for safety + h := state.Context().BlockHeader() + if isLatest { + lastBlockHeight = qms.LatestVersion() + } + if !checkHeader || !isLatest || isLatest && h.Height == lastBlockHeight { + header = &h + break + } + } + } + + if header == nil { + return sdk.Context{}, + errorsmod.Wrapf( + sdkerrors.ErrInvalidHeight, + "context did not contain latest block height in either check state or finalize block state (%d)", lastBlockHeight, + ) + } + + // when a client did not provide a query height, manually inject the latest + if isLatest { + height = lastBlockHeight + } + + cacheMS, err := qms.CacheMultiStoreWithVersion(height) + if err != nil { + return sdk.Context{}, + errorsmod.Wrapf( + sdkerrors.ErrNotFound, + "failed to load state at height %d; %s (latest height: %d)", height, err, lastBlockHeight, + ) + } + + // branch the commit multi-store for safety + ctx := sdk.NewContext(cacheMS, *header, true, app.logger). + WithMinGasPrices(app.minGasPrices). + WithGasMeter(storetypes.NewGasMeter(app.queryGasLimit)). + WithBlockHeader(*header). + WithBlockHeight(height) + + if !isLatest { + rms, ok := app.cms.(*rootmulti.Store) + if ok { + cInfo, err := rms.GetCommitInfo(height) + if cInfo != nil && err == nil { + ctx = ctx.WithBlockHeight(height).WithBlockTime(cInfo.Timestamp) + } + } + } + return ctx, nil +} + +// GetBlockRetentionHeight returns the height for which all blocks below this height +// are pruned from CometBFT. Given a commitment height and a non-zero local +// minRetainBlocks configuration, the retentionHeight is the smallest height that +// satisfies: +// +// - Unbonding (safety threshold) time: The block interval in which validators +// can be economically punished for misbehavior. Blocks in this interval must be +// auditable e.g. by the light client. +// +// - Logical store snapshot interval: The block interval at which the underlying +// logical store database is persisted to disk, e.g. every 10000 heights. Blocks +// since the last IAVL snapshot must be available for replay on application restart. +// +// - State sync snapshots: Blocks since the oldest available snapshot must be +// available for state sync nodes to catch up (oldest because a node may be +// restoring an old snapshot while a new snapshot was taken). +// +// - Local (minRetainBlocks) config: Archive nodes may want to retain more or +// all blocks, e.g. via a local config option min-retain-blocks. There may also +// be a need to vary retention for other nodes, e.g. sentry nodes which do not +// need historical blocks. +func (app *BaseApp) GetBlockRetentionHeight(commitHeight int64) int64 { + // If minRetainBlocks is zero, pruning is disabled and we return 0 + // If commitHeight is less than or equal to minRetainBlocks, return 0 since there are not enough + // blocks to trigger pruning yet. This ensures we keep all blocks until we have at least minRetainBlocks. + retentionBlockWindow := commitHeight - int64(app.minRetainBlocks) + if app.minRetainBlocks == 0 || retentionBlockWindow <= 0 { + return 0 + } + + minNonZero := func(x, y int64) int64 { + switch { + case x == 0: + return y + + case y == 0: + return x + + case x < y: + return x + + default: + return y + } + } + + // Define retentionHeight as the minimum value that satisfies all non-zero + // constraints. All blocks below (commitHeight-retentionHeight) are pruned + // from CometBFT. + var retentionHeight int64 + + // Define the number of blocks needed to protect against misbehaving validators + // which allows light clients to operate safely. Note, we piggy back of the + // evidence parameters instead of computing an estimated number of blocks based + // on the unbonding period and block commitment time as the two should be + // equivalent. + cp := app.GetConsensusParams(app.finalizeBlockState.Context()) + if cp.Evidence != nil && cp.Evidence.MaxAgeNumBlocks > 0 { + retentionHeight = commitHeight - cp.Evidence.MaxAgeNumBlocks + } + + if app.snapshotManager != nil { + snapshotRetentionHeights := app.snapshotManager.GetSnapshotBlockRetentionHeights() + if snapshotRetentionHeights > 0 { + retentionHeight = minNonZero(retentionHeight, commitHeight-snapshotRetentionHeights) + } + } + + retentionHeight = minNonZero(retentionHeight, retentionBlockWindow) + + if retentionHeight <= 0 { + // prune nothing in the case of a non-positive height + return 0 + } + + return retentionHeight +} + +// toVoteInfo converts the new ExtendedVoteInfo to VoteInfo. +func toVoteInfo(votes []abci.ExtendedVoteInfo) []abci.VoteInfo { + legacyVotes := make([]abci.VoteInfo, len(votes)) + for i, vote := range votes { + legacyVotes[i] = abci.VoteInfo{ + Validator: abci.Validator{ + Address: vote.Validator.Address, + Power: vote.Validator.Power, + }, + BlockIdFlag: vote.BlockIdFlag, + } + } + + return legacyVotes +} diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go new file mode 100644 index 0000000..ce92b3e --- /dev/null +++ b/baseapp/abci_test.go @@ -0,0 +1,2537 @@ +package baseapp_test + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "strconv" + "strings" + "testing" + "time" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/crypto/secp256k1" + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + dbm "github.com/cosmos/cosmos-db" + protoio "github.com/cosmos/gogoproto/io" + "github.com/cosmos/gogoproto/jsonpb" + "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/snapshots" + snapshottypes "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" + "github.com/cosmos/cosmos-sdk/baseapp/testutil/mock" + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/mempool" + "github.com/cosmos/cosmos-sdk/x/auth/signing" +) + +type mockABCIListener struct { + ListenCommitFn func(context.Context, abci.ResponseCommit, []*storetypes.StoreKVPair) error +} + +func (m mockABCIListener) ListenFinalizeBlock(_ context.Context, _ abci.RequestFinalizeBlock, _ abci.ResponseFinalizeBlock) error { + return nil +} + +func (m *mockABCIListener) ListenCommit(ctx context.Context, commit abci.ResponseCommit, pairs []*storetypes.StoreKVPair) error { + return m.ListenCommitFn(ctx, commit, pairs) +} + +func TestABCI_Info(t *testing.T) { + suite := NewBaseAppSuite(t) + + reqInfo := abci.RequestInfo{} + res, err := suite.baseApp.Info(&reqInfo) + require.NoError(t, err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + require.Equal(t, "", res.Version) + require.Equal(t, t.Name(), res.GetData()) + require.Equal(t, int64(0), res.LastBlockHeight) + require.Equal(t, appHash, res.LastBlockAppHash) + require.Equal(t, suite.baseApp.AppVersion(), res.AppVersion) +} + +func TestABCI_First_block_Height(t *testing.T) { + suite := NewBaseAppSuite(t, baseapp.SetChainID("test-chain-id")) + app := suite.baseApp + + _, err := app.InitChain(&abci.RequestInitChain{ + ChainId: "test-chain-id", + ConsensusParams: &cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: 5000000}}, + InitialHeight: 1, + }) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + ctx := app.GetContextForCheckTx(nil) + require.Equal(t, int64(1), ctx.BlockHeight()) +} + +func TestABCI_InitChain(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := log.NewTestLogger(t) + app := baseapp.NewBaseApp(name, logger, db, nil, baseapp.SetChainID("test-chain-id")) + + capKey := storetypes.NewKVStoreKey("main") + capKey2 := storetypes.NewKVStoreKey("key2") + app.MountStores(capKey, capKey2) + + // set a value in the store on init chain + key, value := []byte("hello"), []byte("goodbye") + var initChainer sdk.InitChainer = func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { + store := ctx.KVStore(capKey) + store.Set(key, value) + return &abci.ResponseInitChain{}, nil + } + + query := abci.RequestQuery{ + Path: "/store/main/key", + Data: key, + } + + // initChain is nil and chain ID is wrong - errors + _, err := app.InitChain(&abci.RequestInitChain{ChainId: "wrong-chain-id"}) + require.Error(t, err) + + // initChain is nil - nothing happens + _, err = app.InitChain(&abci.RequestInitChain{ChainId: "test-chain-id"}) + require.NoError(t, err) + resQ, err := app.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, 0, len(resQ.Value)) + + // set initChainer and try again - should see the value + app.SetInitChainer(initChainer) + + // stores are mounted and private members are set - sealing baseapp + err = app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(0), app.LastBlockHeight()) + + initChainRes, err := app.InitChain(&abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty + require.NoError(t, err) + + // The AppHash returned by a new chain is the sha256 hash of "". + // $ echo -n '' | sha256sum + // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + require.NoError(t, err) + + require.Equal(t, appHash, initChainRes.AppHash) + + // assert that chainID is set correctly in InitChain + chainID := getFinalizeBlockStateCtx(app).ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") + + chainID = getCheckStateCtx(app).ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{ + Hash: initChainRes.AppHash, + Height: 1, + }) + require.NoError(t, err) + + _, err = app.Commit() + require.NoError(t, err) + + resQ, err = app.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, int64(1), app.LastBlockHeight()) + require.Equal(t, value, resQ.Value) + + // reload app + app = baseapp.NewBaseApp(name, logger, db, nil) + app.SetInitChainer(initChainer) + app.MountStores(capKey, capKey2) + err = app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(1), app.LastBlockHeight()) + + // ensure we can still query after reloading + resQ, err = app.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, value, resQ.Value) + + // commit and ensure we can still query + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: app.LastBlockHeight() + 1}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + resQ, err = app.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, value, resQ.Value) +} + +func TestABCI_InitChain_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + _, err := app.InitChain( + &abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +func TestABCI_FinalizeBlock_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + _, err := app.InitChain( + &abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + require.NoError(t, err) + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 4}) + require.Error(t, err, "invalid height: 4; expected: 3") + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 3}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +func TestABCI_FinalizeBlock_WithBeginAndEndBlocker(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + app.SetBeginBlocker(func(ctx sdk.Context) (sdk.BeginBlock, error) { + return sdk.BeginBlock{ + Events: []abci.Event{ + { + Type: "sometype", + Attributes: []abci.EventAttribute{ + { + Key: "foo", + Value: "bar", + }, + }, + }, + }, + }, nil + }) + + app.SetEndBlocker(func(ctx sdk.Context) (sdk.EndBlock, error) { + return sdk.EndBlock{ + Events: []abci.Event{ + { + Type: "anothertype", + Attributes: []abci.EventAttribute{ + { + Key: "foo", + Value: "bar", + }, + }, + }, + }, + }, nil + }) + + _, err := app.InitChain( + &abci.RequestInitChain{ + InitialHeight: 1, + }, + ) + require.NoError(t, err) + + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + + require.Len(t, res.Events, 2) + + require.Equal(t, "sometype", res.Events[0].Type) + require.Equal(t, "foo", res.Events[0].Attributes[0].Key) + require.Equal(t, "bar", res.Events[0].Attributes[0].Value) + require.Equal(t, "mode", res.Events[0].Attributes[1].Key) + require.Equal(t, "BeginBlock", res.Events[0].Attributes[1].Value) + + require.Equal(t, "anothertype", res.Events[1].Type) + require.Equal(t, "foo", res.Events[1].Attributes[0].Key) + require.Equal(t, "bar", res.Events[1].Attributes[0].Value) + require.Equal(t, "mode", res.Events[1].Attributes[1].Key) + require.Equal(t, "EndBlock", res.Events[1].Attributes[1].Value) + + _, err = app.Commit() + require.NoError(t, err) + + require.Equal(t, int64(1), app.LastBlockHeight()) +} + +func TestABCI_ExtendVote(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + app.SetExtendVoteHandler(func(ctx sdk.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + voteExt := "foo" + hex.EncodeToString(req.Hash) + strconv.FormatInt(req.Height, 10) + return &abci.ResponseExtendVote{VoteExtension: []byte(voteExt)}, nil + }) + + app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + // do some kind of verification here + expectedVoteExt := "foo" + hex.EncodeToString(req.Hash) + strconv.FormatInt(req.Height, 10) + if !bytes.Equal(req.VoteExtension, []byte(expectedVoteExt)) { + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil + } + + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil + }) + + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + _, err := app.InitChain( + &abci.RequestInitChain{ + InitialHeight: 1, + ConsensusParams: &cmtproto.ConsensusParams{ + Abci: &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: 200, + }, + }, + }, + ) + require.NoError(t, err) + + // Votes not enabled yet + _, err = app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 123, Hash: []byte("thehash")}) + require.ErrorContains(t, err, "vote extensions are not enabled") + + // First vote on the first enabled height + res, err := app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 200, Hash: []byte("thehash")}) + require.NoError(t, err) + require.Len(t, res.VoteExtension, 20) + + res, err = app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 1000, Hash: []byte("thehash")}) + require.NoError(t, err) + require.Len(t, res.VoteExtension, 21) + + // Error during vote extension should return an empty vote extension and no error + app.SetExtendVoteHandler(func(ctx sdk.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + return nil, errors.New("some error") + }) + res, err = app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 1000, Hash: []byte("thehash")}) + require.NoError(t, err) + require.Len(t, res.VoteExtension, 0) + + // Verify Vote Extensions + _, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 123, VoteExtension: []byte("1234567")}) + require.ErrorContains(t, err, "vote extensions are not enabled") + + // First vote on the first enabled height + vres, err := app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 200, Hash: []byte("thehash"), VoteExtension: []byte("foo74686568617368200")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) + + vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 1000, Hash: []byte("thehash"), VoteExtension: []byte("foo746865686173681000")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) + + // Reject because it's just some random bytes + vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) + + // Reject because the verification failed (no error) + app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + return nil, errors.New("some error") + }) + vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) +} + +// TestABCI_OnlyVerifyVoteExtension makes sure we can call VerifyVoteExtension +// without having called ExtendVote before. +func TestABCI_OnlyVerifyVoteExtension(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + // do some kind of verification here + expectedVoteExt := "foo" + hex.EncodeToString(req.Hash) + strconv.FormatInt(req.Height, 10) + if !bytes.Equal(req.VoteExtension, []byte(expectedVoteExt)) { + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil + } + + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil + }) + + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + _, err := app.InitChain( + &abci.RequestInitChain{ + InitialHeight: 1, + ConsensusParams: &cmtproto.ConsensusParams{ + Abci: &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: 200, + }, + }, + }, + ) + require.NoError(t, err) + + // Verify Vote Extensions + _, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 123, VoteExtension: []byte("1234567")}) + require.ErrorContains(t, err, "vote extensions are not enabled") + + // First vote on the first enabled height + vres, err := app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 200, Hash: []byte("thehash"), VoteExtension: []byte("foo74686568617368200")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) + + vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 1000, Hash: []byte("thehash"), VoteExtension: []byte("foo746865686173681000")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) + + // Reject because it's just some random bytes + vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) + + // Reject because the verification failed (no error) + app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + return nil, errors.New("some error") + }) + vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) + require.NoError(t, err) + require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) +} + +func TestABCI_GRPCQuery(t *testing.T) { + grpcQueryOpt := func(bapp *baseapp.BaseApp) { + testdata.RegisterQueryServer( + bapp.GRPCQueryRouter(), + testdata.QueryImpl{}, + ) + } + + suite := NewBaseAppSuite(t, grpcQueryOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + req := testdata.SayHelloRequest{Name: "foo"} + reqBz, err := req.Marshal() + require.NoError(t, err) + + resQuery, err := suite.baseApp.Query(context.TODO(), &abci.RequestQuery{ + Data: reqBz, + Path: "/testpb.Query/SayHello", + }) + require.NoError(t, err) + require.Equal(t, sdkerrors.ErrInvalidHeight.ABCICode(), resQuery.Code, resQuery) + require.Contains(t, resQuery.Log, "TestABCI_GRPCQuery is not ready; please wait for first block") + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: suite.baseApp.LastBlockHeight() + 1}) + require.NoError(t, err) + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + reqQuery := abci.RequestQuery{ + Data: reqBz, + Path: "/testpb.Query/SayHello", + } + + resQuery, err = suite.baseApp.Query(context.TODO(), &reqQuery) + require.NoError(t, err) + require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) + + var res testdata.SayHelloResponse + require.NoError(t, res.Unmarshal(resQuery.Value)) + require.Equal(t, "Hello foo!", res.Greeting) +} + +func TestABCI_P2PQuery(t *testing.T) { + addrPeerFilterOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAddrPeerFilter(func(addrport string) *abci.ResponseQuery { + require.Equal(t, "1.1.1.1:8000", addrport) + return &abci.ResponseQuery{Code: uint32(3)} + }) + } + + idPeerFilterOpt := func(bapp *baseapp.BaseApp) { + bapp.SetIDPeerFilter(func(id string) *abci.ResponseQuery { + require.Equal(t, "testid", id) + return &abci.ResponseQuery{Code: uint32(4)} + }) + } + + suite := NewBaseAppSuite(t, addrPeerFilterOpt, idPeerFilterOpt) + + addrQuery := abci.RequestQuery{ + Path: "/p2p/filter/addr/1.1.1.1:8000", + } + res, err := suite.baseApp.Query(context.TODO(), &addrQuery) + require.NoError(t, err) + require.Equal(t, uint32(3), res.Code) + + idQuery := abci.RequestQuery{ + Path: "/p2p/filter/id/testid", + } + res, err = suite.baseApp.Query(context.TODO(), &idQuery) + require.NoError(t, err) + require.Equal(t, uint32(4), res.Code) +} + +func TestBaseApp_PrepareCheckState(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := log.NewTestLogger(t) + + cp := &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxGas: 5000000, + }, + } + + app := baseapp.NewBaseApp(name, logger, db, nil) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + _, err := app.InitChain(&abci.RequestInitChain{ + ConsensusParams: cp, + }) + require.NoError(t, err) + + wasPrepareCheckStateCalled := false + app.SetPrepareCheckStater(func(ctx sdk.Context) { + wasPrepareCheckStateCalled = true + }) + app.Seal() + + _, err = app.Commit() + require.NoError(t, err) + require.Equal(t, true, wasPrepareCheckStateCalled) +} + +func TestBaseApp_Precommit(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := log.NewTestLogger(t) + + cp := &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxGas: 5000000, + }, + } + + app := baseapp.NewBaseApp(name, logger, db, nil) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + _, err := app.InitChain(&abci.RequestInitChain{ + ConsensusParams: cp, + }) + require.NoError(t, err) + + wasPrecommiterCalled := false + app.SetPrecommiter(func(ctx sdk.Context) { + wasPrecommiterCalled = true + }) + app.Seal() + + _, err = app.Commit() + require.NoError(t, err) + require.Equal(t, true, wasPrecommiterCalled) +} + +func TestABCI_CheckTx(t *testing.T) { + // This ante handler reads the key and checks that the value matches the + // current counter. This ensures changes to the KVStore persist across + // successive CheckTx runs. + counterKey := []byte("counter-key") + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } + suite := NewBaseAppSuite(t, anteOpt) + + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, counterKey}) + + nTxs := int64(5) + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + for i := int64(0); i < nTxs; i++ { + tx := newTxCounter(t, suite.txConfig, i, 0) // no messages + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + r, err := suite.baseApp.CheckTx(&abci.RequestCheckTx{Tx: txBytes}) + require.NoError(t, err) + require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) + require.Empty(t, r.GetEvents()) + } + + checkStateStore := getCheckStateCtx(suite.baseApp).KVStore(capKey1) + storedCounter := getIntFromStore(t, checkStateStore, counterKey) + + // ensure AnteHandler ran + require.Equal(t, nTxs, storedCounter) + + // if a block is committed, CheckTx state should be reset + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + Hash: []byte("hash"), + }) + require.NoError(t, err) + + require.NotNil(t, getCheckStateCtx(suite.baseApp).BlockGasMeter(), "block gas meter should have been set to checkState") + require.NotEmpty(t, getCheckStateCtx(suite.baseApp).HeaderHash()) + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + checkStateStore = getCheckStateCtx(suite.baseApp).KVStore(capKey1) + storedBytes := checkStateStore.Get(counterKey) + require.Nil(t, storedBytes) +} + +func TestABCI_FinalizeBlock_DeliverTx(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + suite := NewBaseAppSuite(t, anteOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := range nBlocks { + + txs := [][]byte{} + for i := range txPerHeight { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(t, suite.txConfig, counter, counter) + + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + txs = append(txs, txBytes) + } + + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: int64(blockN) + 1, + Txs: txs, + }) + require.NoError(t, err) + + for i := range txPerHeight { + counter := int64(blockN*txPerHeight + i) + require.True(t, res.TxResults[i].IsOK(), fmt.Sprintf("%v", res)) + + events := res.TxResults[i].GetEvents() + require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0].Attributes[0], events[2].Attributes[0], "msg handler update counter event") + } + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + } +} + +func TestABCI_FinalizeBlock_MultiMsg(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + suite := NewBaseAppSuite(t, anteOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + deliverKey2 := []byte("deliver-key2") + baseapptestutil.RegisterCounter2Server(suite.baseApp.MsgServiceRouter(), Counter2ServerImpl{t, capKey1, deliverKey2}) + + // run a multi-msg tx + // with all msgs the same route + tx := newTxCounter(t, suite.txConfig, 0, 0, 1, 2) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + Txs: [][]byte{txBytes}, + }) + require.NoError(t, err) + + store := getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1) + + // tx counter only incremented once + txCounter := getIntFromStore(t, store, anteKey) + require.Equal(t, int64(1), txCounter) + + // msg counter incremented three times + msgCounter := getIntFromStore(t, store, deliverKey) + require.Equal(t, int64(3), msgCounter) + + // replace the second message with a Counter2 + tx = newTxCounter(t, suite.txConfig, 1, 3) + + builder := suite.txConfig.NewTxBuilder() + msgs := tx.GetMsgs() + _, _, addr := testdata.KeyTestPubAddr() + msgs = append(msgs, &baseapptestutil.MsgCounter2{Counter: 0, Signer: addr.String()}) + msgs = append(msgs, &baseapptestutil.MsgCounter2{Counter: 1, Signer: addr.String()}) + + require.NoError(t, builder.SetMsgs(msgs...)) + builder.SetMemo(tx.GetMemo()) + setTxSignature(t, builder, 0) + + txBytes, err = suite.txConfig.TxEncoder()(builder.GetTx()) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + Txs: [][]byte{txBytes}, + }) + require.NoError(t, err) + + store = getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1) + + // tx counter only incremented once + txCounter = getIntFromStore(t, store, anteKey) + require.Equal(t, int64(2), txCounter) + + // original counter increments by one + // new counter increments by two + msgCounter = getIntFromStore(t, store, deliverKey) + require.Equal(t, int64(4), msgCounter) + + msgCounter2 := getIntFromStore(t, store, deliverKey2) + require.Equal(t, int64(2), msgCounter2) +} + +func TestABCI_Query_SimulateTx(t *testing.T) { + gasConsumed := uint64(5) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasConsumed)) + return newCtx, err + }) + } + suite := NewBaseAppSuite(t, anteOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{gasConsumed}) + + nBlocks := 3 + for blockN := range nBlocks { + count := int64(blockN + 1) + + tx := newTxCounter(t, suite.txConfig, count, count) + + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.Nil(t, err) + + // simulate a message, check gas reported + gInfo, result, err := suite.baseApp.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate again, same result + gInfo, result, err = suite.baseApp.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate by calling Query with encoded tx + query := abci.RequestQuery{ + Path: "/app/simulate", + Data: txBytes, + } + queryResult, err := suite.baseApp.Query(context.TODO(), &query) + require.NoError(t, err) + require.True(t, queryResult.IsOK(), queryResult.Log) + + var simRes sdk.SimulationResponse + require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) + + require.Equal(t, gInfo, simRes.GasInfo) + require.Equal(t, result.Log, simRes.Result.Log) + require.Equal(t, result.Events, simRes.Result.Events) + require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: count}) + require.NoError(t, err) + _, err = suite.baseApp.Commit() + require.NoError(t, err) + } +} + +func TestABCI_InvalidTransaction(t *testing.T) { + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + return newCtx, err + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + }) + require.NoError(t, err) + + // malformed transaction bytes + { + bz := []byte("example vote extension") + result, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + Txs: [][]byte{bz}, + }) + + require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), result.TxResults[0].Codespace, err) + require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), result.TxResults[0].Code, err) + require.EqualValues(t, 0, result.TxResults[0].GasUsed, err) + require.EqualValues(t, 0, result.TxResults[0].GasWanted, err) + } + // transaction with no messages + { + emptyTx := suite.txConfig.NewTxBuilder().GetTx() + bz, err := suite.txConfig.TxEncoder()(emptyTx) + require.NoError(t, err) + result, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + Txs: [][]byte{bz}, + }) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), result.TxResults[0].Codespace, err) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), result.TxResults[0].Code, err) + } + + // transaction where ValidateBasic fails + { + testCases := []struct { + tx signing.Tx + fail bool + }{ + {newTxCounter(t, suite.txConfig, 0, 0), false}, + {newTxCounter(t, suite.txConfig, -1, 0), false}, + {newTxCounter(t, suite.txConfig, 100, 100), false}, + {newTxCounter(t, suite.txConfig, 100, 5, 4, 3, 2, 1), false}, + + {newTxCounter(t, suite.txConfig, 0, -1), true}, + {newTxCounter(t, suite.txConfig, 0, 1, -2), true}, + {newTxCounter(t, suite.txConfig, 0, 1, 2, -10, 5), true}, + } + + for _, testCase := range testCases { + tx := testCase.tx + _, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx) + + if testCase.fail { + require.Error(t, err) + + space, code, _ := errorsmod.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) + } else { + require.NotNil(t, result) + } + } + } + + // transaction with no known route + { + txBuilder := suite.txConfig.NewTxBuilder() + _, _, addr := testdata.KeyTestPubAddr() + require.NoError(t, txBuilder.SetMsgs(&baseapptestutil.MsgCounter2{Signer: addr.String()})) + setTxSignature(t, txBuilder, 0) + unknownRouteTx := txBuilder.GetTx() + + _, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := errorsmod.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + + txBuilder = suite.txConfig.NewTxBuilder() + require.NoError(t, txBuilder.SetMsgs( + &baseapptestutil.MsgCounter{Signer: addr.String()}, + &baseapptestutil.MsgCounter2{Signer: addr.String()}, + )) + setTxSignature(t, txBuilder, 0) + unknownRouteTx = txBuilder.GetTx() + + _, result, err = suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ = errorsmod.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + } + + // Transaction with an unregistered message + { + txBuilder := suite.txConfig.NewTxBuilder() + require.NoError(t, txBuilder.SetMsgs(&testdata.MsgCreateDog{})) + tx := txBuilder.GetTx() + + _, _, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx) + require.Error(t, err) + space, code, _ := errorsmod.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code) + require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), space) + } +} + +func TestABCI_TxGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasGranted)) + + // AnteHandlers must have their own defer/recover in order for the BaseApp + // to know how much gas was used! This is because the GasMeter is created in + // the AnteHandler, but if it panics the context won't be set properly in + // runTx's recover call. + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case storetypes.ErrorOutOfGas: + err = errorsmod.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count, _ := parseTxMemo(t, tx) + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return newCtx, nil + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + }) + require.NoError(t, err) + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + testCases := []struct { + tx signing.Tx + gasUsed int64 + fail bool + }{ + {newTxCounter(t, suite.txConfig, 0, 0), 0, false}, + {newTxCounter(t, suite.txConfig, 1, 1), 2, false}, + {newTxCounter(t, suite.txConfig, 9, 1), 10, false}, + {newTxCounter(t, suite.txConfig, 1, 9), 10, false}, + {newTxCounter(t, suite.txConfig, 10, 0), 10, false}, + + {newTxCounter(t, suite.txConfig, 9, 2), 11, true}, + {newTxCounter(t, suite.txConfig, 2, 9), 11, true}, + // {newTxCounter(t, suite.txConfig, 9, 1, 1), 11, true}, + // {newTxCounter(t, suite.txConfig, 1, 8, 1, 1), 11, true}, + // {newTxCounter(t, suite.txConfig, 11, 0), 11, true}, + // {newTxCounter(t, suite.txConfig, 0, 11), 11, true}, + // {newTxCounter(t, suite.txConfig, 0, 5, 11), 16, true}, + } + + txs := [][]byte{} + for _, tc := range testCases { + tx := tc.tx + bz, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + txs = append(txs, bz) + } + + // Deliver the txs + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 2, + Txs: txs, + }) + + require.NoError(t, err) + + for i, tc := range testCases { + + result := res.TxResults[i] + + require.Equal(t, tc.gasUsed, result.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, result.GasUsed, result, err)) + + // check for out of gas + if !tc.fail { + require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) + } else { + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), result.Codespace, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), result.Code, err) + } + } +} + +func TestABCI_MaxBlockGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasGranted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case storetypes.ErrorOutOfGas: + err = errorsmod.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count, _ := parseTxMemo(t, tx) + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return newCtx, err + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxGas: 100, + }, + }, + }) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + + testCases := []struct { + tx signing.Tx + numDelivers int + gasUsedPerDeliver uint64 + fail bool + failAfterDeliver int + }{ + {newTxCounter(t, suite.txConfig, 0, 0), 0, 0, false, 0}, + {newTxCounter(t, suite.txConfig, 9, 1), 2, 10, false, 0}, + {newTxCounter(t, suite.txConfig, 10, 0), 3, 10, false, 0}, + {newTxCounter(t, suite.txConfig, 10, 0), 10, 10, false, 0}, + {newTxCounter(t, suite.txConfig, 2, 7), 11, 9, false, 0}, + // {newTxCounter(t, suite.txConfig, 10, 0), 10, 10, false, 0}, // hit the limit but pass + + // {newTxCounter(t, suite.txConfig, 10, 0), 11, 10, true, 10}, + // {newTxCounter(t, suite.txConfig, 10, 0), 15, 10, true, 10}, + // {newTxCounter(t, suite.txConfig, 9, 0), 12, 9, true, 11}, // fly past the limit + } + + for i, tc := range testCases { + tx := tc.tx + + // reset block gas + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: suite.baseApp.LastBlockHeight() + 1}) + require.NoError(t, err) + + // execute the transaction multiple times + for j := range tc.numDelivers { + + _, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx) + + ctx := getFinalizeBlockStateCtx(suite.baseApp) + + // check for failed transactions + if tc.fail && (j+1) > tc.failAfterDeliver { + require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + require.Nil(t, tx, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + + space, code, _ := errorsmod.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + require.True(t, ctx.BlockGasMeter().IsOutOfGas()) + } else { + // check gas used and wanted + blockGasUsed := ctx.BlockGasMeter().GasConsumed() + expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) + require.Equal( + t, expBlockGasUsed, blockGasUsed, + fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), + ) + + require.NotNil(t, tx, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) + require.False(t, ctx.BlockGasMeter().IsPastLimit()) + } + } + } +} + +func TestABCI_GasConsumptionBadTx(t *testing.T) { + gasWanted := uint64(5) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasWanted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case storetypes.ErrorOutOfGas: + log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) + err = errorsmod.Wrap(sdkerrors.ErrOutOfGas, log) + default: + panic(r) + } + } + }() + + counter, failOnAnte := parseTxMemo(t, tx) + newCtx.GasMeter().ConsumeGas(uint64(counter), "counter-ante") + if failOnAnte { + return newCtx, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + return newCtx, err + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxGas: 9, + }, + }, + }) + require.NoError(t, err) + + tx := newTxCounter(t, suite.txConfig, 5, 0) + tx = setFailOnAnte(t, suite.txConfig, tx, true) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + // require next tx to fail due to black gas limit + tx = newTxCounter(t, suite.txConfig, 5, 0) + txBytes2, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: suite.baseApp.LastBlockHeight() + 1, + Txs: [][]byte{txBytes, txBytes2}, + }) + require.NoError(t, err) +} + +func TestABCI_Query(t *testing.T) { + key, value := []byte("hello"), []byte("goodbye") + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + store := ctx.KVStore(capKey1) + store.Set(key, value) + return newCtx, err + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + // NOTE: "/store/key1" tells us KVStore + // and the final "/key" says to use the data as the + // key in the given KVStore ... + query := abci.RequestQuery{ + Path: "/store/key1/key", + Data: key, + } + tx := newTxCounter(t, suite.txConfig, 0, 0) + + // query is empty before we do anything + res, err := suite.baseApp.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a CheckTx + _, resTx, err := suite.baseApp.SimCheck(suite.txConfig.TxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + + res, err = suite.baseApp.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, 0, len(res.Value)) + + bz, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 1, + Txs: [][]byte{bz}, + }) + require.NoError(t, err) + + res, err = suite.baseApp.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, 0, len(res.Value)) + + // query returns correct value after Commit + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + res, err = suite.baseApp.Query(context.TODO(), &query) + require.NoError(t, err) + require.Equal(t, value, res.Value) +} + +func TestABCI_GetBlockRetentionHeight(t *testing.T) { + logger := log.NewTestLogger(t) + db := dbm.NewMemDB() + name := t.Name() + + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) + require.NoError(t, err) + + testCases := map[string]struct { + bapp *baseapp.BaseApp + maxAgeBlocks int64 + commitHeight int64 + expected int64 + }{ + "defaults": { + bapp: baseapp.NewBaseApp(name, logger, db, nil), + maxAgeBlocks: 0, + commitHeight: 499000, + expected: 0, + }, + "pruning unbonding time only": { + bapp: baseapp.NewBaseApp(name, logger, db, nil, baseapp.SetMinRetainBlocks(1)), + maxAgeBlocks: 362880, + commitHeight: 499000, + expected: 136120, + }, + "pruning iavl snapshot only": { + bapp: baseapp.NewBaseApp( + name, logger, db, nil, + baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), + baseapp.SetMinRetainBlocks(1), + baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(10000, 1)), + ), + maxAgeBlocks: 0, + commitHeight: 499000, + expected: 489000, + }, + "pruning state sync snapshot only": { + bapp: baseapp.NewBaseApp( + name, logger, db, nil, + baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), + baseapp.SetMinRetainBlocks(1), + ), + maxAgeBlocks: 0, + commitHeight: 499000, + expected: 349000, + }, + "pruning min retention only": { + bapp: baseapp.NewBaseApp( + name, logger, db, nil, + baseapp.SetMinRetainBlocks(400000), + ), + maxAgeBlocks: 0, + commitHeight: 499000, + expected: 99000, + }, + "pruning all conditions": { + bapp: baseapp.NewBaseApp( + name, logger, db, nil, + baseapp.SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), + baseapp.SetMinRetainBlocks(400000), + baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), + ), + maxAgeBlocks: 362880, + commitHeight: 499000, + expected: 99000, + }, + "no pruning due to no persisted state": { + bapp: baseapp.NewBaseApp( + name, logger, db, nil, + baseapp.SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), + baseapp.SetMinRetainBlocks(400000), + baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), + ), + maxAgeBlocks: 362880, + commitHeight: 10000, + expected: 0, + }, + "no pruning due to min retain blocks equal to commit height": { + bapp: baseapp.NewBaseApp(name, logger, db, nil, baseapp.SetMinRetainBlocks(499000)), + maxAgeBlocks: 362880, + commitHeight: 499000, + expected: 0, + }, + "no pruning due to min retain blocks greater than commit height": { + bapp: baseapp.NewBaseApp(name, logger, db, nil, baseapp.SetMinRetainBlocks(499001)), + maxAgeBlocks: 362880, + commitHeight: 499000, + expected: 0, + }, + "disable pruning": { + bapp: baseapp.NewBaseApp( + name, logger, db, nil, + baseapp.SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), + baseapp.SetMinRetainBlocks(0), + baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), + ), + maxAgeBlocks: 362880, + commitHeight: 499000, + expected: 0, + }, + } + + for name, tc := range testCases { + + tc.bapp.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + _, err = tc.bapp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{ + Evidence: &cmtproto.EvidenceParams{ + MaxAgeNumBlocks: tc.maxAgeBlocks, + }, + }, + }) + require.NoError(t, err) + + t.Run(name, func(t *testing.T) { + require.Equal(t, tc.expected, tc.bapp.GetBlockRetentionHeight(tc.commitHeight)) + }) + } +} + +// Verifies that PrepareCheckState is called with the checkState. +func TestPrepareCheckStateCalledWithCheckState(t *testing.T) { + t.Parallel() + + logger := log.NewTestLogger(t) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil) + + wasPrepareCheckStateCalled := false + app.SetPrepareCheckStater(func(ctx sdk.Context) { + require.Equal(t, true, ctx.IsCheckTx()) + wasPrepareCheckStateCalled = true + }) + + _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + require.Equal(t, true, wasPrepareCheckStateCalled) +} + +// Verifies that the Precommiter is called with the deliverState. +func TestPrecommiterCalledWithDeliverState(t *testing.T) { + t.Parallel() + + logger := log.NewTestLogger(t) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil) + + wasPrecommiterCalled := false + app.SetPrecommiter(func(ctx sdk.Context) { + require.Equal(t, false, ctx.IsCheckTx()) + require.Equal(t, false, ctx.IsReCheckTx()) + wasPrecommiterCalled = true + }) + + _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + require.Equal(t, true, wasPrecommiterCalled) +} + +func TestABCI_Proposal_HappyPath(t *testing.T) { + anteKey := []byte("ante-key") + pool := mempool.NewSenderNonceMempool(mempool.SenderNonceMaxTxOpt(5000)) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) + baseapptestutil.RegisterKeyValueServer(suite.baseApp.MsgServiceRouter(), MsgKeyValueImpl{}) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + tx := newTxCounter(t, suite.txConfig, 0, 1) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + reqCheckTx := abci.RequestCheckTx{ + Tx: txBytes, + Type: abci.CheckTxType_New, + } + _, err = suite.baseApp.CheckTx(&reqCheckTx) + require.NoError(t, err) + + tx2 := newTxCounter(t, suite.txConfig, 1, 1) + + tx2Bytes, err := suite.txConfig.TxEncoder()(tx2) + require.NoError(t, err) + + err = pool.Insert(sdk.Context{}, tx2) + require.NoError(t, err) + + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 2, len(resPrepareProposal.Txs)) + + reqProposalTxBytes := [2][]byte{ + txBytes, + tx2Bytes, + } + reqProcessProposal := abci.RequestProcessProposal{ + Txs: reqProposalTxBytes[:], + Height: reqPrepareProposal.Height, + } + + resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) + + // the same txs as in PrepareProposal + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: suite.baseApp.LastBlockHeight() + 1, + Txs: reqProposalTxBytes[:], + }) + require.NoError(t, err) + + require.Equal(t, 0, pool.CountTx()) + + require.NotEmpty(t, res.TxResults[0].Events) + require.True(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) +} + +func TestABCI_Proposal_Read_State_PrepareProposal(t *testing.T) { + someKey := []byte("some-key") + + setInitChainerOpt := func(bapp *baseapp.BaseApp) { + bapp.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { + ctx.KVStore(capKey1).Set(someKey, []byte("foo")) + return &abci.ResponseInitChain{}, nil + }) + } + + prepareOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + value := ctx.KVStore(capKey1).Get(someKey) + // We should be able to access any state written in InitChain + require.Equal(t, "foo", string(value)) + return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil + }) + } + + suite := NewBaseAppSuite(t, setInitChainerOpt, prepareOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + InitialHeight: 1, + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, // this value can't be 0 + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 0, len(resPrepareProposal.Txs)) + + reqProposalTxBytes := [][]byte{} + reqProcessProposal := abci.RequestProcessProposal{ + Txs: reqProposalTxBytes, + Height: reqPrepareProposal.Height, + } + + resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) +} + +func TestABCI_Proposals_WithVE(t *testing.T) { + someVoteExtension := []byte("some-vote-extension") + + setInitChainerOpt := func(bapp *baseapp.BaseApp) { + bapp.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { + return &abci.ResponseInitChain{}, nil + }) + } + + prepareOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + // Inject the vote extension to the beginning of the proposal + txs := make([][]byte, len(req.Txs)+1) + txs[0] = someVoteExtension + copy(txs[1:], req.Txs) + + return &abci.ResponsePrepareProposal{Txs: txs}, nil + }) + + bapp.SetProcessProposal(func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + // Check that the vote extension is still there + require.Equal(t, someVoteExtension, req.Txs[0]) + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + }) + } + + suite := NewBaseAppSuite(t, setInitChainerOpt, prepareOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + InitialHeight: 1, + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 100000, + Height: 1, // this value can't be 0 + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 1, len(resPrepareProposal.Txs)) + + reqProcessProposal := abci.RequestProcessProposal{ + Txs: resPrepareProposal.Txs, + Height: reqPrepareProposal.Height, + } + resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) + + // Run finalize block and ensure that the vote extension is still there and that + // the proposal is accepted + result, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Txs: resPrepareProposal.Txs, + Height: reqPrepareProposal.Height, + }) + require.NoError(t, err) + require.Equal(t, 1, len(result.TxResults)) + require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), result.TxResults[0].Codespace, err) + require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), result.TxResults[0].Code, err) + require.EqualValues(t, 0, result.TxResults[0].GasUsed, err) + require.EqualValues(t, 0, result.TxResults[0].GasWanted, err) +} + +func TestABCI_PrepareProposal_ReachedMaxBytes(t *testing.T) { + anteKey := []byte("ante-key") + pool := mempool.NewSenderNonceMempool(mempool.SenderNonceMaxTxOpt(5000)) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + for i := range 100 { + tx2 := newTxCounter(t, suite.txConfig, int64(i), int64(i)) + err := pool.Insert(sdk.Context{}, tx2) + require.NoError(t, err) + } + + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1500, + Height: 1, + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 8, len(resPrepareProposal.Txs)) +} + +func TestABCI_PrepareProposal_BadEncoding(t *testing.T) { + anteKey := []byte("ante-key") + pool := mempool.NewSenderNonceMempool(mempool.SenderNonceMaxTxOpt(5000)) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + tx := newTxCounter(t, suite.txConfig, 0, 0) + err = pool.Insert(sdk.Context{}, tx) + require.NoError(t, err) + + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 1, len(resPrepareProposal.Txs)) +} + +func TestABCI_PrepareProposal_OverGasUnderBytes(t *testing.T) { + pool := mempool.NewSenderNonceMempool(mempool.SenderNonceMaxTxOpt(5000)) + suite := NewBaseAppSuite(t, baseapp.SetMempool(pool)) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + // set max block gas limit to 99, this will allow 9 txs of 10 gas each. + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{MaxGas: 99}, + }, + }) + + require.NoError(t, err) + // insert 100 txs, each with a gas limit of 10 + _, _, addr := testdata.KeyTestPubAddr() + for i := int64(0); i < 100; i++ { + msg := &baseapptestutil.MsgCounter{Counter: i, FailOnHandler: false, Signer: addr.String()} + msgs := []sdk.Msg{msg} + + builder := suite.txConfig.NewTxBuilder() + err = builder.SetMsgs(msgs...) + require.NoError(t, err) + builder.SetMemo("counter=" + strconv.FormatInt(i, 10) + "&failOnAnte=false") + builder.SetGasLimit(10) + setTxSignature(t, builder, uint64(i)) + + err := pool.Insert(sdk.Context{}, builder.GetTx()) + require.NoError(t, err) + } + + // ensure we only select transactions that fit within the block gas limit + res, err := suite.baseApp.PrepareProposal(&abci.RequestPrepareProposal{ + MaxTxBytes: 1_000_000, // large enough to ignore restriction + Height: 1, + }) + require.NoError(t, err) + + // Should include 9 transactions + require.Len(t, res.Txs, 9, "invalid number of transactions returned") +} + +func TestABCI_PrepareProposal_MaxGas(t *testing.T) { + pool := mempool.NewSenderNonceMempool(mempool.SenderNonceMaxTxOpt(5000)) + suite := NewBaseAppSuite(t, baseapp.SetMempool(pool)) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + // set max block gas limit to 100 + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{MaxGas: 100}, + }, + }) + require.NoError(t, err) + + // insert 100 txs, each with a gas limit of 10 + _, _, addr := testdata.KeyTestPubAddr() + for i := int64(0); i < 100; i++ { + msg := &baseapptestutil.MsgCounter{Counter: i, FailOnHandler: false, Signer: addr.String()} + msgs := []sdk.Msg{msg} + + builder := suite.txConfig.NewTxBuilder() + require.NoError(t, builder.SetMsgs(msgs...)) + builder.SetMemo("counter=" + strconv.FormatInt(i, 10) + "&failOnAnte=false") + builder.SetGasLimit(10) + setTxSignature(t, builder, uint64(i)) + + err := pool.Insert(sdk.Context{}, builder.GetTx()) + require.NoError(t, err) + } + + // ensure we only select transactions that fit within the block gas limit + res, err := suite.baseApp.PrepareProposal(&abci.RequestPrepareProposal{ + MaxTxBytes: 1_000_000, // large enough to ignore restriction + Height: 1, + }) + require.NoError(t, err) + require.Len(t, res.Txs, 10, "invalid number of transactions returned") +} + +func TestABCI_PrepareProposal_Failures(t *testing.T) { + anteKey := []byte("ante-key") + pool := mempool.NewSenderNonceMempool(mempool.SenderNonceMaxTxOpt(5000)) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + tx := newTxCounter(t, suite.txConfig, 0, 0) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + reqCheckTx := abci.RequestCheckTx{ + Tx: txBytes, + Type: abci.CheckTxType_New, + } + checkTxRes, err := suite.baseApp.CheckTx(&reqCheckTx) + require.NoError(t, err) + require.True(t, checkTxRes.IsOK()) + + failTx := newTxCounter(t, suite.txConfig, 1, 1) + failTx = setFailOnAnte(t, suite.txConfig, failTx, true) + + err = pool.Insert(sdk.Context{}, failTx) + require.NoError(t, err) + require.Equal(t, 2, pool.CountTx()) + + req := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, + } + res, err := suite.baseApp.PrepareProposal(&req) + require.NoError(t, err) + require.Equal(t, 1, len(res.Txs)) +} + +func TestABCI_PrepareProposal_PanicRecovery(t *testing.T) { + prepareOpt := func(app *baseapp.BaseApp) { + app.SetPrepareProposal(func(ctx sdk.Context, rpp *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + panic(errors.New("test")) + }) + } + suite := NewBaseAppSuite(t, prepareOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + req := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, + } + + require.NotPanics(t, func() { + res, err := suite.baseApp.PrepareProposal(&req) + require.NoError(t, err) + require.Equal(t, req.Txs, res.Txs) + }) +} + +func TestABCI_PrepareProposal_VoteExtensions(t *testing.T) { + // set up mocks + ctrl := gomock.NewController(t) + valStore := mock.NewMockValidatorStore(ctrl) + privkey := secp256k1.GenPrivKey() + pubkey := privkey.PubKey() + addr := sdk.AccAddress(pubkey.Address()) + tmPk := cmtprotocrypto.PublicKey{ + Sum: &cmtprotocrypto.PublicKey_Secp256K1{ + Secp256K1: pubkey.Bytes(), + }, + } + + consAddr := sdk.ConsAddress(addr.String()) + valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), consAddr.Bytes()).Return(tmPk, nil) + + // set up baseapp + prepareOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + ctx = ctx.WithBlockHeight(req.Height).WithChainID(bapp.ChainID()) + _, info := extendedCommitToLastCommit(req.LocalLastCommit) + ctx = ctx.WithCometInfo(info) + err := baseapp.ValidateVoteExtensions(ctx, valStore, 0, "", req.LocalLastCommit) + if err != nil { + return nil, err + } + + cp := ctx.ConsensusParams() + extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 + if extsEnabled { + req.Txs = append(req.Txs, []byte("some-tx-that-does-something-from-votes")) + } + return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil + }) + } + + suite := NewBaseAppSuite(t, prepareOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + InitialHeight: 1, + ConsensusParams: &cmtproto.ConsensusParams{ + Abci: &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: 2, + }, + }, + }) + require.NoError(t, err) + + // first test without vote extensions, no new txs should be added + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, // this value can't be 0 + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 0, len(resPrepareProposal.Txs)) + + // now we try with vote extensions, a new tx should show up + marshalDelimitedFn := func(msg proto.Message) ([]byte, error) { + var buf bytes.Buffer + if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + ext := []byte("something") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, // the vote extension was signed in the previous height + Round: int64(0), + ChainId: suite.baseApp.ChainID(), + } + + bz, err := marshalDelimitedFn(&cve) + require.NoError(t, err) + + extSig, err := privkey.Sign(bz) + require.NoError(t, err) + + reqPrepareProposal = abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 3, // this value can't be 0 + LocalLastCommit: abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + { + Validator: abci.Validator{ + Address: consAddr.Bytes(), + Power: 666, + }, + VoteExtension: ext, + ExtensionSignature: extSig, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + }, + }, + } + resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 1, len(resPrepareProposal.Txs)) + + // now vote extensions but our sole voter doesn't reach majority + reqPrepareProposal = abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 3, // this value can't be 0 + LocalLastCommit: abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + { + Validator: abci.Validator{ + Address: consAddr.Bytes(), + Power: 666, + }, + VoteExtension: ext, + ExtensionSignature: extSig, + BlockIdFlag: cmtproto.BlockIDFlagNil, // This will ignore the vote extension + }, + }, + }, + } + resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 0, len(resPrepareProposal.Txs)) +} + +func TestABCI_ProcessProposal_PanicRecovery(t *testing.T) { + processOpt := func(app *baseapp.BaseApp) { + app.SetProcessProposal(func(ctx sdk.Context, rpp *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + panic(errors.New("test")) + }) + } + suite := NewBaseAppSuite(t, processOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + require.NotPanics(t, func() { + res, err := suite.baseApp.ProcessProposal(&abci.RequestProcessProposal{Height: 1}) + require.NoError(t, err) + require.Equal(t, res.Status, abci.ResponseProcessProposal_REJECT) + }) +} + +// TestABCI_Proposal_Reset_State ensures that state is reset between runs of +// PrepareProposal and ProcessProposal in case they are called multiple times. +// This is only valid for heights > 1, given that on height 1 we always set the +// state to be deliverState. +func TestABCI_Proposal_Reset_State_Between_Calls(t *testing.T) { + someKey := []byte("some-key") + + prepareOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + // This key should not exist given that we reset the state on every call. + require.False(t, ctx.KVStore(capKey1).Has(someKey)) + ctx.KVStore(capKey1).Set(someKey, someKey) + return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil + }) + } + + processOpt := func(bapp *baseapp.BaseApp) { + bapp.SetProcessProposal(func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + // This key should not exist given that we reset the state on every call. + require.False(t, ctx.KVStore(capKey1).Has(someKey)) + ctx.KVStore(capKey1).Set(someKey, someKey) + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + }) + } + + suite := NewBaseAppSuite(t, prepareOpt, processOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 2, // this value can't be 0 + } + + // Let's pretend something happened and PrepareProposal gets called many + // times, this must be safe to do. + for range 5 { + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 0, len(resPrepareProposal.Txs)) + } + + reqProposalTxBytes := [][]byte{} + reqProcessProposal := abci.RequestProcessProposal{ + Txs: reqProposalTxBytes, + Height: 2, + } + + // Let's pretend something happened and ProcessProposal gets called many + // times, this must be safe to do. + for range 5 { + resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) + } +} + +func TestABCI_HaltChain(t *testing.T) { + testCases := []struct { + name string + haltHeight uint64 + haltTime uint64 + blockHeight int64 + blockTime int64 + expHalt bool + }{ + {"default", 0, 0, 10, 0, false}, + {"halt-height-edge", 11, 0, 10, 0, false}, + {"halt-height-equal", 10, 0, 10, 0, true}, + {"halt-height", 10, 0, 10, 0, true}, + {"halt-time-edge", 0, 11, 1, 10, false}, + {"halt-time-equal", 0, 10, 1, 10, true}, + {"halt-time", 0, 10, 1, 11, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + suite := NewBaseAppSuite(t, baseapp.SetHaltHeight(tc.haltHeight), baseapp.SetHaltTime(tc.haltTime)) + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + InitialHeight: tc.blockHeight, + }) + require.NoError(t, err) + + app := suite.baseApp + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: tc.blockHeight, + Time: time.Unix(tc.blockTime, 0), + }) + if !tc.expHalt { + require.NoError(t, err) + } else { + require.Error(t, err) + require.True(t, strings.HasPrefix(err.Error(), "halt per configuration")) + } + }) + } +} + +func TestBaseApp_PreBlocker(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := log.NewTestLogger(t) + + app := baseapp.NewBaseApp(name, logger, db, nil) + _, err := app.InitChain(&abci.RequestInitChain{}) + require.NoError(t, err) + + wasHookCalled := false + app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) { + wasHookCalled = true + + ctx.EventManager().EmitEvent(sdk.NewEvent("preblockertest", sdk.NewAttribute("height", fmt.Sprintf("%d", req.Height)))) + return &sdk.ResponsePreBlock{ConsensusParamsChanged: false}, nil + }) + app.Seal() + + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + require.Equal(t, true, wasHookCalled) + require.Len(t, res.Events, 1) + require.Equal(t, "preblockertest", res.Events[0].Type) + + // Now try erroring + app = baseapp.NewBaseApp(name, logger, db, nil) + _, err = app.InitChain(&abci.RequestInitChain{}) + require.NoError(t, err) + + app.SetPreBlocker(func(_ sdk.Context, req *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) { + return nil, errors.New("some error") + }) + app.Seal() + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.Error(t, err) +} + +// TestBaseApp_VoteExtensions tests vote extensions using a price as an example. +func TestBaseApp_VoteExtensions(t *testing.T) { + ctrl := gomock.NewController(t) + valStore := mock.NewMockValidatorStore(ctrl) + + // 10 good vote extensions, 2 bad ones from 12 total validators + numVals := 12 + privKeys := make([]secp256k1.PrivKey, numVals) + vals := make([]sdk.ConsAddress, numVals) + for i := range numVals { + privKey := secp256k1.GenPrivKey() + privKeys[i] = privKey + + pubKey := privKey.PubKey() + val := sdk.ConsAddress(pubKey.Bytes()) + vals[i] = val + + tmPk := cmtprotocrypto.PublicKey{ + Sum: &cmtprotocrypto.PublicKey_Secp256K1{ + Secp256K1: pubKey.Bytes(), + }, + } + valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), val).Return(tmPk, nil) + } + + baseappOpts := func(app *baseapp.BaseApp) { + app.SetExtendVoteHandler(func(sdk.Context, *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + // here we would have a process to get the price from an external source + price := 10000000 + rand.Int63n(1000000) + ve := make([]byte, 8) + binary.BigEndian.PutUint64(ve, uint64(price)) + return &abci.ResponseExtendVote{VoteExtension: ve}, nil + }) + + app.SetVerifyVoteExtensionHandler(func(_ sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + vePrice := binary.BigEndian.Uint64(req.VoteExtension) + // here we would do some price validation, must not be 0 and not too high + if vePrice > 11000000 || vePrice == 0 { + // usually application should always return ACCEPT unless they really want to discard the entire vote + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil + } + + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil + }) + + app.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + txs := [][]byte{} + ctx = ctx.WithBlockHeight(req.Height).WithChainID(app.ChainID()) + _, info := extendedCommitToLastCommit(req.LocalLastCommit) + ctx = ctx.WithCometInfo(info) + if err := baseapp.ValidateVoteExtensions(ctx, valStore, 0, "", req.LocalLastCommit); err != nil { + return nil, err + } + // add all VE as txs (in a real scenario we would need to check signatures too) + for _, v := range req.LocalLastCommit.Votes { + if len(v.VoteExtension) == 8 { + // pretend this is a way to check if the VE is valid + if binary.BigEndian.Uint64(v.VoteExtension) < 11000000 && binary.BigEndian.Uint64(v.VoteExtension) > 0 { + txs = append(txs, v.VoteExtension) + } + } + } + + return &abci.ResponsePrepareProposal{Txs: txs}, nil + }) + + app.SetProcessProposal(func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + // here we check if the proposal is valid, mainly if the vote extensions appended to the txs are valid + for _, v := range req.Txs { + // pretend this is a way to check if the tx is actually a VE + if len(v) == 8 { + // pretend this is a way to check if the VE is valid + if binary.BigEndian.Uint64(v) > 11000000 || binary.BigEndian.Uint64(v) == 0 { + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + } + } + + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + }) + + app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) { + count := uint64(0) + pricesSum := uint64(0) + for _, v := range req.Txs { + // pretend this is a way to check if the tx is actually a VE + if len(v) == 8 { + count++ + pricesSum += binary.BigEndian.Uint64(v) + } + } + + if count > 0 { + // we process the average price and store it in the context to make it available for FinalizeBlock + avgPrice := pricesSum / count + buf := make([]byte, 8) + binary.BigEndian.PutUint64(buf, avgPrice) + ctx.KVStore(capKey1).Set([]byte("avgPrice"), buf) + } + + return &sdk.ResponsePreBlock{ + ConsensusParamsChanged: true, + }, nil + }) + } + + suite := NewBaseAppSuite(t, baseappOpts) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{ + Abci: &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: 1, + }, + }, + }) + require.NoError(t, err) + + allVEs := [][]byte{} + // simulate getting 10 vote extensions from 10 validators + for range 10 { + ve, err := suite.baseApp.ExtendVote(context.TODO(), &abci.RequestExtendVote{Height: 1}) + require.NoError(t, err) + allVEs = append(allVEs, ve.VoteExtension) + } + + // add a couple of invalid vote extensions (in what regards to the check we are doing in VerifyVoteExtension/ProcessProposal) + // add a 0 price + ve := make([]byte, 8) + binary.BigEndian.PutUint64(ve, uint64(0)) + allVEs = append(allVEs, ve) + + // add a price too high + ve = make([]byte, 8) + binary.BigEndian.PutUint64(ve, uint64(13000000)) + allVEs = append(allVEs, ve) + + // verify all votes, only 10 should be accepted + successful := 0 + for _, v := range allVEs { + res, err := suite.baseApp.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{ + Height: 1, + VoteExtension: v, + }) + require.NoError(t, err) + if res.Status == abci.ResponseVerifyVoteExtension_ACCEPT { + successful++ + } + } + require.Equal(t, 10, successful) + + extVotes := []abci.ExtendedVoteInfo{} + for _, val := range vals { + extVotes = append(extVotes, abci.ExtendedVoteInfo{ + VoteExtension: allVEs[0], + BlockIdFlag: cmtproto.BlockIDFlagCommit, + ExtensionSignature: []byte{}, + Validator: abci.Validator{ + Address: val.Bytes(), + Power: 666, + }, + }, + ) + } + + prepPropReq := &abci.RequestPrepareProposal{ + Height: 1, + LocalLastCommit: abci.ExtendedCommitInfo{ + Round: 0, + Votes: extVotes, + }, + } + + // add all VEs to the local last commit, which will make PrepareProposal fail + // because it's not expecting to receive vote extensions when height == VoteExtensionsEnableHeight + for _, ve := range allVEs { + prepPropReq.LocalLastCommit.Votes = append(prepPropReq.LocalLastCommit.Votes, abci.ExtendedVoteInfo{ + VoteExtension: ve, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + ExtensionSignature: []byte{}, // doesn't matter, it's just to make the next PrepareProposal fail + }) + } + resp, err := suite.baseApp.PrepareProposal(prepPropReq) + require.Len(t, resp.Txs, 0) // this is actually a failure, but we don't want to halt the chain + require.NoError(t, err) // we don't error here + + prepPropReq.LocalLastCommit.Votes = []abci.ExtendedVoteInfo{} // reset votes + resp, err = suite.baseApp.PrepareProposal(prepPropReq) + require.NoError(t, err) + require.Len(t, resp.Txs, 0) + + procPropRes, err := suite.baseApp.ProcessProposal(&abci.RequestProcessProposal{Height: 1, Txs: resp.Txs}) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, procPropRes.Status) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: resp.Txs}) + require.NoError(t, err) + + // The average price will be nil during the first block, given that we don't have + // any vote extensions on block 1 in PrepareProposal + avgPrice := getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1).Get([]byte("avgPrice")) + require.Nil(t, avgPrice) + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + // Now onto the second block, this time we process vote extensions from the + // previous block (which we sign now) + for i, ve := range allVEs { + cve := cmtproto.CanonicalVoteExtension{ + Extension: ve, + Height: 1, + Round: int64(0), + ChainId: suite.baseApp.ChainID(), + } + + bz, err := marshalDelimitedFn(&cve) + require.NoError(t, err) + + privKey := privKeys[i] + extSig, err := privKey.Sign(bz) + require.NoError(t, err) + + prepPropReq.LocalLastCommit.Votes = append(prepPropReq.LocalLastCommit.Votes, abci.ExtendedVoteInfo{ + VoteExtension: ve, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + ExtensionSignature: extSig, + Validator: abci.Validator{ + Address: vals[i].Bytes(), + Power: 666, + }, + }) + } + + prepPropReq.Height = 2 + resp, err = suite.baseApp.PrepareProposal(prepPropReq) + require.NoError(t, err) + require.Len(t, resp.Txs, 10) + + procPropRes, err = suite.baseApp.ProcessProposal(&abci.RequestProcessProposal{Height: 2, Txs: resp.Txs}) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, procPropRes.Status) + + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2, Txs: resp.Txs}) + require.NoError(t, err) + + // Check if the average price was available in FinalizeBlock's context + avgPrice = getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1).Get([]byte("avgPrice")) + require.NotNil(t, avgPrice) + require.GreaterOrEqual(t, binary.BigEndian.Uint64(avgPrice), uint64(10000000)) + require.Less(t, binary.BigEndian.Uint64(avgPrice), uint64(11000000)) + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + // check if avgPrice was committed + committedAvgPrice := suite.baseApp.NewContext(true).KVStore(capKey1).Get([]byte("avgPrice")) + require.Equal(t, avgPrice, committedAvgPrice) +} + +func TestABCI_PrepareProposal_Panic(t *testing.T) { + prepareOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + if len(req.Txs) == 3 { + panic("i don't like number 3, panic") + } + // return empty if no panic + return &abci.ResponsePrepareProposal{}, nil + }) + } + + suite := NewBaseAppSuite(t, prepareOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + InitialHeight: 1, + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + txs := [][]byte{{1}, {2}} + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, // this value can't be 0 + Txs: txs, + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 0, len(resPrepareProposal.Txs)) + + // make it panic, and check if it returns 3 txs (because of panic recovery) + txs = [][]byte{{1}, {2}, {3}} + reqPrepareProposal.Txs = txs + resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 3, len(resPrepareProposal.Txs)) +} + +func TestOptimisticExecution(t *testing.T) { + suite := NewBaseAppSuite(t, baseapp.SetOptimisticExecution()) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + // run 50 blocks + for range 50 { + tx := newTxCounter(t, suite.txConfig, 0, 1) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + reqProcProp := abci.RequestProcessProposal{ + Txs: [][]byte{txBytes}, + Height: suite.baseApp.LastBlockHeight() + 1, + Hash: []byte("some-hash" + strconv.FormatInt(suite.baseApp.LastBlockHeight()+1, 10)), + } + + respProcProp, err := suite.baseApp.ProcessProposal(&reqProcProp) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, respProcProp.Status) + require.NoError(t, err) + + reqFinalizeBlock := abci.RequestFinalizeBlock{ + Height: reqProcProp.Height, + Txs: reqProcProp.Txs, + Hash: reqProcProp.Hash, + } + + respFinalizeBlock, err := suite.baseApp.FinalizeBlock(&reqFinalizeBlock) + require.NoError(t, err) + require.Len(t, respFinalizeBlock.TxResults, 1) + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + } + + require.Equal(t, int64(50), suite.baseApp.LastBlockHeight()) +} + +func TestABCI_Proposal_FailReCheckTx(t *testing.T) { + pool := mempool.NewPriorityMempool[int64](mempool.PriorityNonceMempoolConfig[int64]{ + TxPriority: mempool.NewDefaultTxPriority(), + MaxTx: 0, + SignerExtractor: mempool.NewDefaultSignerExtractionAdapter(), + }) + + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { + // always fail on recheck, just to test the recheck logic + if ctx.IsReCheckTx() { + return ctx, errors.New("recheck failed in ante handler") + } + + return ctx, nil + }) + } + + suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) + baseapptestutil.RegisterKeyValueServer(suite.baseApp.MsgServiceRouter(), MsgKeyValueImpl{}) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + tx := newTxCounter(t, suite.txConfig, 0, 1) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + reqCheckTx := abci.RequestCheckTx{ + Tx: txBytes, + Type: abci.CheckTxType_New, + } + _, err = suite.baseApp.CheckTx(&reqCheckTx) + require.NoError(t, err) + + tx2 := newTxCounter(t, suite.txConfig, 1, 1) + + tx2Bytes, err := suite.txConfig.TxEncoder()(tx2) + require.NoError(t, err) + + err = pool.Insert(sdk.Context{}, tx2) + require.NoError(t, err) + + require.Equal(t, 2, pool.CountTx()) + + // call prepareProposal before calling recheck tx, just as a sanity check + reqPrepareProposal := abci.RequestPrepareProposal{ + MaxTxBytes: 1000, + Height: 1, + } + resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 2, len(resPrepareProposal.Txs)) + + // call recheck on the first tx, it MUST return an error + reqReCheckTx := abci.RequestCheckTx{ + Tx: txBytes, + Type: abci.CheckTxType_Recheck, + } + resp, err := suite.baseApp.CheckTx(&reqReCheckTx) + require.NoError(t, err) + require.True(t, resp.IsErr()) + require.Equal(t, "recheck failed in ante handler", resp.Log) + + // call prepareProposal again, should return only the second tx + resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) + require.NoError(t, err) + require.Equal(t, 1, len(resPrepareProposal.Txs)) + require.Equal(t, tx2Bytes, resPrepareProposal.Txs[0]) + + // check the mempool, it should have only the second tx + require.Equal(t, 1, pool.CountTx()) + + reqProposalTxBytes := [][]byte{ + tx2Bytes, + } + reqProcessProposal := abci.RequestProcessProposal{ + Txs: reqProposalTxBytes, + Height: reqPrepareProposal.Height, + } + + resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) + require.NoError(t, err) + require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) + + // the same txs as in PrepareProposal + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: suite.baseApp.LastBlockHeight() + 1, + Txs: reqProposalTxBytes, + }) + require.NoError(t, err) + + require.Equal(t, 0, pool.CountTx()) + + require.NotEmpty(t, res.TxResults[0].Events) + require.True(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) +} + +func TestFinalizeBlockDeferResponseHandle(t *testing.T) { + suite := NewBaseAppSuite(t, baseapp.SetHaltHeight(1), func(ba *baseapp.BaseApp) { + ba.SetStreamingManager(storetypes.StreamingManager{ + ABCIListeners: []storetypes.ABCIListener{ + &mockABCIListener{}, + }, + }) + }) + + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: 2, + }) + require.Empty(t, res) + require.NotEmpty(t, err) +} diff --git a/baseapp/abci_utils.go b/baseapp/abci_utils.go new file mode 100644 index 0000000..c2921b8 --- /dev/null +++ b/baseapp/abci_utils.go @@ -0,0 +1,533 @@ +package baseapp + +import ( + "bytes" + "context" + "fmt" + "slices" + + "github.com/cockroachdb/errors" + abci "github.com/cometbft/cometbft/abci/types" + cryptoenc "github.com/cometbft/cometbft/crypto/encoding" + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttypes "github.com/cometbft/cometbft/types" + protoio "github.com/cosmos/gogoproto/io" + "github.com/cosmos/gogoproto/proto" + + "cosmossdk.io/core/comet" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/mempool" +) + +type ( + // ValidatorStore defines the interface contract required for verifying vote + // extension signatures. Typically, this will be implemented by the x/staking + // module, which has knowledge of the CometBFT public key. + ValidatorStore interface { + GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error) + } + + // GasTx defines the contract that a transaction with a gas limit must implement. + GasTx interface { + GetGas() uint64 + } +) + +// ValidateVoteExtensions defines a helper function for verifying vote extension +// signatures that may be passed or manually injected into a block proposal from +// a proposer in PrepareProposal. It returns an error if any signature is invalid +// or if unexpected vote extensions and/or signatures are found or less than 2/3 +// power is received. +// NOTE: From v0.50.5 `currentHeight` and `chainID` arguments are ignored for fixing an issue. +// They will be removed from the function in v0.51+. +func ValidateVoteExtensions( + ctx sdk.Context, + valStore ValidatorStore, + _ int64, + _ string, + extCommit abci.ExtendedCommitInfo, +) error { + // Get values from context + cp := ctx.ConsensusParams() + currentHeight := ctx.HeaderInfo().Height + chainID := ctx.HeaderInfo().ChainID + commitInfo := ctx.CometInfo().GetLastCommit() + + // Check that both extCommit + commit are ordered in accordance with vp/address. + if err := validateExtendedCommitAgainstLastCommit(extCommit, commitInfo); err != nil { + return err + } + + // Start checking vote extensions only **after** the vote extensions enable + // height, because when `currentHeight == VoteExtensionsEnableHeight` + // PrepareProposal doesn't get any vote extensions in its request. + extsEnabled := cp.Abci != nil && currentHeight > cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 + marshalDelimitedFn := func(msg proto.Message) ([]byte, error) { + var buf bytes.Buffer + if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + var ( + // Total voting power of all vote extensions. + totalVP int64 + // Total voting power of all validators that submitted valid vote extensions. + sumVP int64 + ) + + for _, vote := range extCommit.Votes { + totalVP += vote.Validator.Power + + // Only check + include power if the vote is a commit vote. There must be super-majority, otherwise the + // previous block (the block the vote is for) could not have been committed. + if vote.BlockIdFlag != cmtproto.BlockIDFlagCommit { + continue + } + + if !extsEnabled { + if len(vote.VoteExtension) > 0 { + return fmt.Errorf("vote extensions disabled; received non-empty vote extension at height %d", currentHeight) + } + if len(vote.ExtensionSignature) > 0 { + return fmt.Errorf("vote extensions disabled; received non-empty vote extension signature at height %d", currentHeight) + } + + continue + } + + if len(vote.ExtensionSignature) == 0 { + return fmt.Errorf("vote extensions enabled; received empty vote extension signature at height %d", currentHeight) + } + + valConsAddr := sdk.ConsAddress(vote.Validator.Address) + + pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr) + if err != nil { + return fmt.Errorf("failed to get validator %X public key: %w", valConsAddr, err) + } + + cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto) + if err != nil { + return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err) + } + + cve := cmtproto.CanonicalVoteExtension{ + Extension: vote.VoteExtension, + Height: currentHeight - 1, // the vote extension was signed in the previous height + Round: int64(extCommit.Round), + ChainId: chainID, + } + + extSignBytes, err := marshalDelimitedFn(&cve) + if err != nil { + return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err) + } + + if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { + return fmt.Errorf("failed to verify validator %X vote extension signature", valConsAddr) + } + + sumVP += vote.Validator.Power + } + + // This check is probably unnecessary, but better safe than sorry. + if totalVP <= 0 { + return fmt.Errorf("total voting power must be positive, got: %d", totalVP) + } + + // If the sum of the voting power has not reached (2/3 + 1) we need to error. + if requiredVP := ((totalVP * 2) / 3) + 1; sumVP < requiredVP { + return fmt.Errorf( + "insufficient cumulative voting power received to verify vote extensions; got: %d, expected: >=%d", + sumVP, requiredVP, + ) + } + return nil +} + +// validateExtendedCommitAgainstLastCommit validates an ExtendedCommitInfo against a LastCommit. Specifically, +// it checks that the ExtendedCommit + LastCommit (for the same height), are consistent with each other + that +// they are ordered correctly (by voting power) in accordance with +// [comet](https://github.com/cometbft/cometbft/blob/4ce0277b35f31985bbf2c25d3806a184a4510010/types/validator_set.go#L784). +func validateExtendedCommitAgainstLastCommit(ec abci.ExtendedCommitInfo, lc comet.CommitInfo) error { + // check that the rounds are the same + if ec.Round != lc.Round() { + return fmt.Errorf("extended commit round %d does not match last commit round %d", ec.Round, lc.Round()) + } + + // check that the # of votes are the same + if len(ec.Votes) != lc.Votes().Len() { + return fmt.Errorf("extended commit votes length %d does not match last commit votes length %d", len(ec.Votes), lc.Votes().Len()) + } + + // check sort order of extended commit votes + if !slices.IsSortedFunc(ec.Votes, func(vote1, vote2 abci.ExtendedVoteInfo) int { + if vote1.Validator.Power == vote2.Validator.Power { + return bytes.Compare(vote1.Validator.Address, vote2.Validator.Address) // addresses sorted in ascending order (used to break vp conflicts) + } + return -int(vote1.Validator.Power - vote2.Validator.Power) // vp sorted in descending order + }) { + return fmt.Errorf("extended commit votes are not sorted by voting power") + } + + addressCache := make(map[string]struct{}, len(ec.Votes)) + // check that consistency between LastCommit and ExtendedCommit + for i, vote := range ec.Votes { + // cache addresses to check for duplicates + if _, ok := addressCache[string(vote.Validator.Address)]; ok { + return fmt.Errorf("extended commit vote address %X is duplicated", vote.Validator.Address) + } + addressCache[string(vote.Validator.Address)] = struct{}{} + + if !bytes.Equal(vote.Validator.Address, lc.Votes().Get(i).Validator().Address()) { + return fmt.Errorf("extended commit vote address %X does not match last commit vote address %X", vote.Validator.Address, lc.Votes().Get(i).Validator().Address()) + } + if vote.Validator.Power != lc.Votes().Get(i).Validator().Power() { + return fmt.Errorf("extended commit vote power %d does not match last commit vote power %d", vote.Validator.Power, lc.Votes().Get(i).Validator().Power()) + } + } + + return nil +} + +type ( + // ProposalTxVerifier defines the interface that is implemented by BaseApp, + // that any custom ABCI PrepareProposal and ProcessProposal handler can use + // to verify a transaction. + ProposalTxVerifier interface { + PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) + ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) + TxDecode(txBz []byte) (sdk.Tx, error) + TxEncode(tx sdk.Tx) ([]byte, error) + } + + // DefaultProposalHandler defines the default ABCI PrepareProposal and + // ProcessProposal handlers. + DefaultProposalHandler struct { + mempool mempool.Mempool + txVerifier ProposalTxVerifier + txSelector TxSelector + signerExtAdapter mempool.SignerExtractionAdapter + } +) + +func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler { + return &DefaultProposalHandler{ + mempool: mp, + txVerifier: txVerifier, + txSelector: NewDefaultTxSelector(), + signerExtAdapter: mempool.NewDefaultSignerExtractionAdapter(), + } +} + +// SetTxSelector sets the TxSelector function on the DefaultProposalHandler. +func (h *DefaultProposalHandler) SetTxSelector(ts TxSelector) { + h.txSelector = ts +} + +// SetSignerExtractionAdapter sets the SetSignerExtractionAdapter on the DefaultProposalHandler. +func (h *DefaultProposalHandler) SetSignerExtractionAdapter(signerExtAdapter mempool.SignerExtractionAdapter) { + h.signerExtAdapter = signerExtAdapter +} + +// PrepareProposalHandler returns the default implementation for processing an +// ABCI proposal. The application's mempool is enumerated and all valid +// transactions are added to the proposal. Transactions are valid if they: +// +// 1) Successfully encode to bytes. +// 2) Are valid (i.e. pass runTx, AnteHandler only). +// +// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is +// reached or the mempool is exhausted. +// +// Note: +// +// - Step (2) is identical to the validation step performed in +// DefaultProcessProposal. It is very important that the same validation logic +// is used in both steps, and applications must ensure that this is the case in +// non-default handlers. +// +// - If no mempool is set or if the mempool is a no-op mempool, the transactions +// requested from CometBFT will simply be returned, which, by default, are in +// FIFO order. +func (h *DefaultProposalHandler) PrepareProposalHandler() sdk.PrepareProposalHandler { + return func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + var maxBlockGas uint64 + if b := ctx.ConsensusParams().Block; b != nil { + maxBlockGas = uint64(b.MaxGas) + } + + defer h.txSelector.Clear() + + // If the mempool is nil or NoOp we simply return the transactions + // requested from CometBFT, which, by default, should be in FIFO order. + // + // Note, we still need to ensure the transactions returned respect req.MaxTxBytes. + _, isNoOp := h.mempool.(mempool.NoOpMempool) + if h.mempool == nil || isNoOp { + for _, txBz := range req.Txs { + tx, err := h.txVerifier.TxDecode(txBz) + if err != nil { + return nil, err + } + + stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz) + if stop { + break + } + } + + return &abci.ResponsePrepareProposal{Txs: h.txSelector.SelectedTxs(ctx)}, nil + } + + selectedTxsSignersSeqs := make(map[string]uint64) + var ( + resError error + selectedTxsNums int + invalidTxs []sdk.Tx // invalid txs to be removed out of the loop to avoid dead lock + ) + mempool.SelectBy(ctx, h.mempool, req.Txs, func(memTx sdk.Tx) bool { + unorderedTx, ok := memTx.(sdk.TxWithUnordered) + isUnordered := ok && unorderedTx.GetUnordered() + txSignersSeqs := make(map[string]uint64) + + // if the tx is unordered, we don't need to check the sequence, we just add it + if !isUnordered { + signerData, err := h.signerExtAdapter.GetSigners(memTx) + if err != nil { + // propagate the error to the caller + resError = err + return false + } + + // If the signers aren't in selectedTxsSignersSeqs then we haven't seen them before + // so we add them and continue given that we don't need to check the sequence. + shouldAdd := true + for _, signer := range signerData { + seq, ok := selectedTxsSignersSeqs[signer.Signer.String()] + if !ok { + txSignersSeqs[signer.Signer.String()] = signer.Sequence + continue + } + + // If we have seen this signer before in this block, we must make + // sure that the current sequence is seq+1; otherwise is invalid + // and we skip it. + if seq+1 != signer.Sequence { + shouldAdd = false + break + } + txSignersSeqs[signer.Signer.String()] = signer.Sequence + } + if !shouldAdd { + return true + } + } + + // NOTE: Since transaction verification was already executed in CheckTx, + // which calls mempool.Insert, in theory everything in the pool should be + // valid. But some mempool implementations may insert invalid txs, so we + // check again. + txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx) + if err != nil { + invalidTxs = append(invalidTxs, memTx) + } else { + stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz) + if stop { + return false + } + + txsLen := len(h.txSelector.SelectedTxs(ctx)) + // If the tx is unordered, we don't need to update the sender sequence. + if !isUnordered { + for sender, seq := range txSignersSeqs { + // If txsLen != selectedTxsNums is true, it means that we've + // added a new tx to the selected txs, so we need to update + // the sequence of the sender. + if txsLen != selectedTxsNums { + selectedTxsSignersSeqs[sender] = seq + } else if _, ok := selectedTxsSignersSeqs[sender]; !ok { + // The transaction hasn't been added but it passed the + // verification, so we know that the sequence is correct. + // So we set this sender's sequence to seq-1, in order + // to avoid unnecessary calls to PrepareProposalVerifyTx. + selectedTxsSignersSeqs[sender] = seq - 1 + } + } + } + selectedTxsNums = txsLen + } + + return true + }) + + if resError != nil { + return nil, resError + } + + for _, tx := range invalidTxs { + err := h.mempool.Remove(tx) + if err != nil && !errors.Is(err, mempool.ErrTxNotFound) { + return nil, err + } + } + + return &abci.ResponsePrepareProposal{Txs: h.txSelector.SelectedTxs(ctx)}, nil + } +} + +// ProcessProposalHandler returns the default implementation for processing an +// ABCI proposal. Every transaction in the proposal must pass 2 conditions: +// +// 1. The transaction bytes must decode to a valid transaction. +// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only) +// +// If any transaction fails to pass either condition, the proposal is rejected. +// Note that step (2) is identical to the validation step performed in +// DefaultPrepareProposal. It is very important that the same validation logic +// is used in both steps, and applications must ensure that this is the case in +// non-default handlers. +func (h *DefaultProposalHandler) ProcessProposalHandler() sdk.ProcessProposalHandler { + // If the mempool is nil or NoOp we simply return ACCEPT, + // because PrepareProposal may have included txs that could fail verification. + _, isNoOp := h.mempool.(mempool.NoOpMempool) + if h.mempool == nil || isNoOp { + return NoOpProcessProposal() + } + + return func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + var totalTxGas uint64 + + var maxBlockGas int64 + if b := ctx.ConsensusParams().Block; b != nil { + maxBlockGas = b.MaxGas + } + + for _, txBytes := range req.Txs { + tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes) + if err != nil { + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + + if maxBlockGas > 0 { + gasTx, ok := tx.(GasTx) + if ok { + totalTxGas += gasTx.GetGas() + } + + if totalTxGas > uint64(maxBlockGas) { + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + } + } + + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + } +} + +// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always +// return the transactions sent by the client's request. +func NoOpPrepareProposal() sdk.PrepareProposalHandler { + return func(_ sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil + } +} + +// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always +// return ACCEPT. +func NoOpProcessProposal() sdk.ProcessProposalHandler { + return func(_ sdk.Context, _ *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil + } +} + +// NoOpExtendVote defines a no-op ExtendVote handler. It will always return an +// empty byte slice as the vote extension. +func NoOpExtendVote() sdk.ExtendVoteHandler { + return func(_ sdk.Context, _ *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + return &abci.ResponseExtendVote{VoteExtension: []byte{}}, nil + } +} + +// NoOpVerifyVoteExtensionHandler defines a no-op VerifyVoteExtension handler. It +// will always return an ACCEPT status with no error. +func NoOpVerifyVoteExtensionHandler() sdk.VerifyVoteExtensionHandler { + return func(_ sdk.Context, _ *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil + } +} + +// TxSelector defines a helper type that assists in selecting transactions during +// mempool transaction selection in PrepareProposal. It keeps track of the total +// number of bytes and total gas of the selected transactions. It also keeps +// track of the selected transactions themselves. +type TxSelector interface { + // SelectedTxs should return a copy of the selected transactions. + SelectedTxs(ctx context.Context) [][]byte + + // Clear should clear the TxSelector, nulling out all relevant fields. + Clear() + + // SelectTxForProposal should attempt to select a transaction for inclusion in + // a proposal based on inclusion criteria defined by the TxSelector. It must + // return if the caller should halt the transaction selection loop + // (typically over a mempool) or otherwise. + SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool +} + +type defaultTxSelector struct { + totalTxBytes uint64 + totalTxGas uint64 + selectedTxs [][]byte +} + +func NewDefaultTxSelector() TxSelector { + return &defaultTxSelector{} +} + +func (ts *defaultTxSelector) SelectedTxs(_ context.Context) [][]byte { + txs := make([][]byte, len(ts.selectedTxs)) + copy(txs, ts.selectedTxs) + return txs +} + +func (ts *defaultTxSelector) Clear() { + ts.totalTxBytes = 0 + ts.totalTxGas = 0 + ts.selectedTxs = nil +} + +func (ts *defaultTxSelector) SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool { + txSize := uint64(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{txBz})) + + var txGasLimit uint64 + if memTx != nil { + if gasTx, ok := memTx.(GasTx); ok { + txGasLimit = gasTx.GetGas() + } + } + + // only add the transaction to the proposal if we have enough capacity + if (txSize + ts.totalTxBytes) <= maxTxBytes { + // If there is a max block gas limit, add the tx only if the limit has + // not been met. + if maxBlockGas > 0 { + if (txGasLimit + ts.totalTxGas) <= maxBlockGas { + ts.totalTxGas += txGasLimit + ts.totalTxBytes += txSize + ts.selectedTxs = append(ts.selectedTxs, txBz) + } + } else { + ts.totalTxBytes += txSize + ts.selectedTxs = append(ts.selectedTxs, txBz) + } + } + + // check if we've reached capacity; if so, we cannot select any more transactions + return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas)) +} diff --git a/baseapp/abci_utils_test.go b/baseapp/abci_utils_test.go new file mode 100644 index 0000000..ca3021e --- /dev/null +++ b/baseapp/abci_utils_test.go @@ -0,0 +1,787 @@ +package baseapp_test + +import ( + "bytes" + "sort" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + cmtsecp256k1 "github.com/cometbft/cometbft/crypto/secp256k1" + cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttypes "github.com/cometbft/cometbft/types" + dbm "github.com/cosmos/cosmos-db" + protoio "github.com/cosmos/gogoproto/io" + "github.com/cosmos/gogoproto/proto" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" + + "cosmossdk.io/core/comet" + "cosmossdk.io/core/header" + "cosmossdk.io/log" + + "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" + "github.com/cosmos/cosmos-sdk/baseapp/testutil/mock" + "github.com/cosmos/cosmos-sdk/client" + codectestutil "github.com/cosmos/cosmos-sdk/codec/testutil" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/mempool" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" +) + +const ( + chainID = "chain-id" +) + +type testValidator struct { + consAddr sdk.ConsAddress + tmPk cmtprotocrypto.PublicKey + privKey cmtsecp256k1.PrivKey +} + +func newTestValidator() testValidator { + privkey := cmtsecp256k1.GenPrivKey() + pubkey := privkey.PubKey() + tmPk := cmtprotocrypto.PublicKey{ + Sum: &cmtprotocrypto.PublicKey_Secp256K1{ + Secp256K1: pubkey.Bytes(), + }, + } + + return testValidator{ + consAddr: sdk.ConsAddress(pubkey.Address()), + tmPk: tmPk, + privKey: privkey, + } +} + +func (t testValidator) toValidator(power int64) abci.Validator { + return abci.Validator{ + Address: t.consAddr.Bytes(), + Power: power, + } +} + +type ABCIUtilsTestSuite struct { + suite.Suite + + valStore *mock.MockValidatorStore + vals [3]testValidator + ctx sdk.Context +} + +func NewABCIUtilsTestSuite(t *testing.T) *ABCIUtilsTestSuite { + t.Helper() + // create 3 validators + s := &ABCIUtilsTestSuite{ + vals: [3]testValidator{ + newTestValidator(), + newTestValidator(), + newTestValidator(), + }, + } + + // create mock + ctrl := gomock.NewController(t) + valStore := mock.NewMockValidatorStore(ctrl) + s.valStore = valStore + + // set up mock + s.valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), s.vals[0].consAddr.Bytes()).Return(s.vals[0].tmPk, nil).AnyTimes() + s.valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), s.vals[1].consAddr.Bytes()).Return(s.vals[1].tmPk, nil).AnyTimes() + s.valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), s.vals[2].consAddr.Bytes()).Return(s.vals[2].tmPk, nil).AnyTimes() + + // create context + s.ctx = sdk.Context{}.WithConsensusParams(cmtproto.ConsensusParams{ + Abci: &cmtproto.ABCIParams{ + VoteExtensionsEnableHeight: 2, + }, + }).WithBlockHeader(cmtproto.Header{ + ChainID: chainID, + }).WithLogger(log.NewTestLogger(t)) + return s +} + +func TestABCIUtilsTestSuite(t *testing.T) { + suite.Run(t, NewABCIUtilsTestSuite(t)) +} + +// check ValidateVoteExtensions works when all nodes have CommitBlockID votes +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsHappyPath() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + extSig1, err := s.vals[1].privKey.Sign(bz) + s.Require().NoError(err) + + extSig2, err := s.vals[2].privKey.Sign(bz) + s.Require().NoError(err) + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // enable vote-extensions + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + { + Validator: s.vals[0].toValidator(333), + VoteExtension: ext, + ExtensionSignature: extSig0, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + { + Validator: s.vals[1].toValidator(333), + VoteExtension: ext, + ExtensionSignature: extSig1, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + { + Validator: s.vals[2].toValidator(334), + VoteExtension: ext, + ExtensionSignature: extSig2, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + }, + } + + // order + convert to last commit + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // expect-pass (votes of height 2 are included in next block) + s.Require().NoError(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +// check ValidateVoteExtensions works when a single node has submitted a BlockID_Absent +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsSingleVoteAbsent() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + extSig2, err := s.vals[2].privKey.Sign(bz) + s.Require().NoError(err) + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // vote-extensions are enabled + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + { + Validator: s.vals[0].toValidator(333), + VoteExtension: ext, + ExtensionSignature: extSig0, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + // validator of power <1/3 is missing, so commit-info shld still be valid + { + Validator: s.vals[1].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagAbsent, + }, + { + Validator: s.vals[2].toValidator(334), + VoteExtension: ext, + ExtensionSignature: extSig2, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + }, + } + + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // expect-pass (votes of height 2 are included in next block) + s.Require().NoError(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +// check ValidateVoteExtensions works with duplicate votes +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsDuplicateVotes() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + ve := abci.ExtendedVoteInfo{ + Validator: s.vals[0].toValidator(333), + VoteExtension: ext, + ExtensionSignature: extSig0, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + } + + ve2 := abci.ExtendedVoteInfo{ + Validator: s.vals[0].toValidator(334), // use diff voting-power to dupe + VoteExtension: ext, + ExtensionSignature: extSig0, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + } + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + ve, + ve2, + }, + } + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // vote-extensions are enabled + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // expect fail (duplicate votes) + s.Require().Error(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +// check ValidateVoteExtensions works when a single node has submitted a BlockID_Nil +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsSingleVoteNil() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + extSig2, err := s.vals[2].privKey.Sign(bz) + s.Require().NoError(err) + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + { + Validator: s.vals[0].toValidator(333), + VoteExtension: ext, + ExtensionSignature: extSig0, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + // validator of power <1/3 is missing, so commit-info should still be valid + { + Validator: s.vals[1].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagNil, + }, + { + Validator: s.vals[2].toValidator(334), + VoteExtension: ext, + ExtensionSignature: extSig2, + BlockIdFlag: cmtproto.BlockIDFlagCommit, + }, + }, + } + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // vote-extensions are enabled + + // create last commit + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // expect-pass (votes of height 2 are included in next block) + s.Require().NoError(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +// check ValidateVoteExtensions works when two nodes have submitted a BlockID_Nil / BlockID_Absent +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsTwoVotesNilAbsent() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + // validator of power >2/3 is missing, so commit-info should not be valid + { + Validator: s.vals[0].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagCommit, + VoteExtension: ext, + ExtensionSignature: extSig0, + }, + { + Validator: s.vals[1].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagNil, + }, + { + Validator: s.vals[2].toValidator(334), + VoteExtension: ext, + BlockIdFlag: cmtproto.BlockIDFlagAbsent, + }, + }, + } + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // vote-extensions are enabled + + // create last commit + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // expect-pass (votes of height 2 are included in next block) + s.Require().Error(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsIncorrectVotingPower() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + // validator of power >2/3 is missing, so commit-info should not be valid + { + Validator: s.vals[0].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagCommit, + VoteExtension: ext, + ExtensionSignature: extSig0, + }, + { + Validator: s.vals[1].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagNil, + }, + { + Validator: s.vals[2].toValidator(334), + VoteExtension: ext, + BlockIdFlag: cmtproto.BlockIDFlagAbsent, + }, + }, + } + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // vote-extensions are enabled + + // create last commit + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // modify voting powers to differ from the last-commit + llc.Votes[0].Validator.Power = 335 + llc.Votes[2].Validator.Power = 332 + + // expect-pass (votes of height 2 are included in next block) + s.Require().Error(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsIncorrectOrder() { + ext := []byte("vote-extension") + cve := cmtproto.CanonicalVoteExtension{ + Extension: ext, + Height: 2, + Round: int64(0), + ChainId: chainID, + } + + bz, err := marshalDelimitedFn(&cve) + s.Require().NoError(err) + + extSig0, err := s.vals[0].privKey.Sign(bz) + s.Require().NoError(err) + + llc := abci.ExtendedCommitInfo{ + Round: 0, + Votes: []abci.ExtendedVoteInfo{ + // validator of power >2/3 is missing, so commit-info should not be valid + { + Validator: s.vals[0].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagCommit, + VoteExtension: ext, + ExtensionSignature: extSig0, + }, + { + Validator: s.vals[1].toValidator(333), + BlockIdFlag: cmtproto.BlockIDFlagNil, + }, + { + Validator: s.vals[2].toValidator(334), + VoteExtension: ext, + BlockIdFlag: cmtproto.BlockIDFlagAbsent, + }, + }, + } + + s.ctx = s.ctx.WithBlockHeight(3).WithHeaderInfo(header.Info{Height: 3, ChainID: chainID}) // vote-extensions are enabled + + // create last commit + llc, info := extendedCommitToLastCommit(llc) + s.ctx = s.ctx.WithCometInfo(info) + + // modify voting powers to differ from the last-commit + llc.Votes[0], llc.Votes[2] = llc.Votes[2], llc.Votes[0] + + // expect-pass (votes of height 2 are included in next block) + s.Require().Error(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 0, "", llc)) +} + +func (s *ABCIUtilsTestSuite) TestDefaultProposalHandler_NoOpMempoolTxSelection() { + // create a codec for marshaling + cdc := codectestutil.CodecOptions{}.NewCodec() + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // create a baseapp along with a tx config for tx generation + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + app := baseapp.NewBaseApp(s.T().Name(), log.NewNopLogger(), dbm.NewMemDB(), txConfig.TxDecoder()) + + // create a proposal handler + ph := baseapp.NewDefaultProposalHandler(mempool.NoOpMempool{}, app) + handler := ph.PrepareProposalHandler() + + // build a tx + _, _, addr := testdata.KeyTestPubAddr() + builder := txConfig.NewTxBuilder() + s.Require().NoError(builder.SetMsgs( + &baseapptestutil.MsgCounter{Counter: 0, FailOnHandler: false, Signer: addr.String()}, + )) + builder.SetGasLimit(100) + setTxSignature(s.T(), builder, 0) + + // encode the tx to be used in the proposal request + tx := builder.GetTx() + txBz, err := txConfig.TxEncoder()(tx) + s.Require().NoError(err) + s.Require().Len(txBz, 152) + + txDataSize := int(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{txBz})) + s.Require().Equal(txDataSize, 155) + + testCases := map[string]struct { + ctx sdk.Context + req *abci.RequestPrepareProposal + expectedTxs int + }{ + "small max tx bytes": { + ctx: s.ctx, + req: &abci.RequestPrepareProposal{ + Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, + MaxTxBytes: 10, + }, + expectedTxs: 0, + }, + "small max gas": { + ctx: s.ctx.WithConsensusParams(cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxGas: 10, + }, + }), + req: &abci.RequestPrepareProposal{ + Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, + MaxTxBytes: 465, + }, + expectedTxs: 0, + }, + "large max tx bytes": { + ctx: s.ctx, + req: &abci.RequestPrepareProposal{ + Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, + MaxTxBytes: 465, + }, + expectedTxs: 3, + }, + "large max tx bytes len calculation": { + ctx: s.ctx, + req: &abci.RequestPrepareProposal{ + Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, + MaxTxBytes: 456, + }, + expectedTxs: 2, + }, + "max gas and tx bytes": { + ctx: s.ctx.WithConsensusParams(cmtproto.ConsensusParams{ + Block: &cmtproto.BlockParams{ + MaxGas: 200, + }, + }), + req: &abci.RequestPrepareProposal{ + Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, + MaxTxBytes: 465, + }, + expectedTxs: 2, + }, + } + + for name, tc := range testCases { + s.Run(name, func() { + // iterate multiple times to ensure the tx selector is cleared each time + for range 6 { + resp, err := handler(tc.ctx, tc.req) + s.Require().NoError(err) + s.Require().Len(resp.Txs, tc.expectedTxs) + } + }) + } +} + +func (s *ABCIUtilsTestSuite) TestDefaultProposalHandler_PriorityNonceMempoolTxSelection() { + cdc := codectestutil.CodecOptions{}.NewCodec() + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + + var ( + secret1 = []byte("secret1") + secret2 = []byte("secret2") + secret3 = []byte("secret3") + secret4 = []byte("secret4") + secret5 = []byte("secret5") + secret6 = []byte("secret6") + ) + + type testTx struct { + tx sdk.Tx + priority int64 + bz []byte + size int + } + + testTxs := []testTx{ + // test 1 + {tx: buildMsg(s.T(), txConfig, []byte(`0`), [][]byte{secret1}, []uint64{1}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`12345678910`), [][]byte{secret1}, []uint64{2}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`22`), [][]byte{secret1}, []uint64{3}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`32`), [][]byte{secret2}, []uint64{1}), priority: 8}, + // test 2 + {tx: buildMsg(s.T(), txConfig, []byte(`4`), [][]byte{secret1, secret2}, []uint64{3, 3}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`52345678910`), [][]byte{secret1, secret3}, []uint64{4, 3}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`62`), [][]byte{secret1, secret4}, []uint64{5, 3}), priority: 8}, + {tx: buildMsg(s.T(), txConfig, []byte(`72`), [][]byte{secret3, secret5}, []uint64{4, 3}), priority: 8}, + {tx: buildMsg(s.T(), txConfig, []byte(`82`), [][]byte{secret2, secret6}, []uint64{4, 3}), priority: 8}, + // test 3 + {tx: buildMsg(s.T(), txConfig, []byte(`9`), [][]byte{secret3, secret4}, []uint64{3, 3}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`1052345678910`), [][]byte{secret1, secret2}, []uint64{4, 4}), priority: 8}, + {tx: buildMsg(s.T(), txConfig, []byte(`11`), [][]byte{secret1, secret2}, []uint64{5, 5}), priority: 8}, + // test 4 + {tx: buildMsg(s.T(), txConfig, []byte(`1252345678910`), [][]byte{secret1}, []uint64{3}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`13`), [][]byte{secret1}, []uint64{5}), priority: 10}, + {tx: buildMsg(s.T(), txConfig, []byte(`14`), [][]byte{secret1}, []uint64{6}), priority: 8}, + } + + for i := range testTxs { + bz, err := txConfig.TxEncoder()(testTxs[i].tx) + s.Require().NoError(err) + testTxs[i].bz = bz + testTxs[i].size = int(cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{bz})) + } + + s.Require().Equal(testTxs[0].size, 111) + s.Require().Equal(testTxs[1].size, 121) + s.Require().Equal(testTxs[2].size, 112) + s.Require().Equal(testTxs[3].size, 112) + s.Require().Equal(testTxs[4].size, 195) + s.Require().Equal(testTxs[5].size, 205) + s.Require().Equal(testTxs[6].size, 196) + s.Require().Equal(testTxs[7].size, 196) + s.Require().Equal(testTxs[8].size, 196) + + testCases := map[string]struct { + ctx sdk.Context + txInputs []testTx + req *abci.RequestPrepareProposal + handler sdk.PrepareProposalHandler + expectedTxs []int + }{ + "skip same-sender non-sequential sequence and then add others txs": { + ctx: s.ctx, + txInputs: []testTx{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, + req: &abci.RequestPrepareProposal{ + MaxTxBytes: 111 + 112, + }, + expectedTxs: []int{0, 3}, + }, + "skip multi-signers msg non-sequential sequence": { + ctx: s.ctx, + txInputs: []testTx{testTxs[4], testTxs[5], testTxs[6], testTxs[7], testTxs[8]}, + req: &abci.RequestPrepareProposal{ + MaxTxBytes: 195 + 196, + }, + expectedTxs: []int{4, 8}, + }, + "only the first tx is added": { + // Because tx 10 is valid, tx 11 can't be valid as they have higher sequence numbers. + ctx: s.ctx, + txInputs: []testTx{testTxs[9], testTxs[10], testTxs[11]}, + req: &abci.RequestPrepareProposal{ + MaxTxBytes: 195 + 196, + }, + expectedTxs: []int{9}, + }, + "no txs added": { + // Becasuse the first tx was deemed valid but too big, the next expected valid sequence is tx[0].seq (3), so + // the rest of the txs fail because they have a seq of 4. + ctx: s.ctx, + txInputs: []testTx{testTxs[12], testTxs[13], testTxs[14]}, + req: &abci.RequestPrepareProposal{ + MaxTxBytes: 112, + }, + expectedTxs: []int{}, + }, + } + + for name, tc := range testCases { + s.Run(name, func() { + ctrl := gomock.NewController(s.T()) + app := mock.NewMockProposalTxVerifier(ctrl) + mp := mempool.NewPriorityMempool( + mempool.PriorityNonceMempoolConfig[int64]{ + TxPriority: mempool.NewDefaultTxPriority(), + MaxTx: 0, + SignerExtractor: mempool.NewDefaultSignerExtractionAdapter(), + }, + ) + + ph := baseapp.NewDefaultProposalHandler(mp, app) + + for _, v := range tc.txInputs { + app.EXPECT().PrepareProposalVerifyTx(v.tx).Return(v.bz, nil).AnyTimes() + s.NoError(mp.Insert(s.ctx.WithPriority(v.priority), v.tx)) + tc.req.Txs = append(tc.req.Txs, v.bz) + } + + resp, err := ph.PrepareProposalHandler()(tc.ctx, tc.req) + s.Require().NoError(err) + respTxIndexes := []int{} + for _, tx := range resp.Txs { + for i, v := range testTxs { + if bytes.Equal(tx, v.bz) { + respTxIndexes = append(respTxIndexes, i) + } + } + } + + s.Require().EqualValues(tc.expectedTxs, respTxIndexes) + }) + } +} + +func marshalDelimitedFn(msg proto.Message) ([]byte, error) { + var buf bytes.Buffer + if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func buildMsg(t *testing.T, txConfig client.TxConfig, value []byte, secrets [][]byte, nonces []uint64) sdk.Tx { + t.Helper() + builder := txConfig.NewTxBuilder() + _ = builder.SetMsgs( + &baseapptestutil.MsgKeyValue{Value: value}, + ) + require.Equal(t, len(secrets), len(nonces)) + signatures := make([]signingtypes.SignatureV2, 0) + for index, secret := range secrets { + nonce := nonces[index] + privKey := secp256k1.GenPrivKeyFromSecret(secret) + pubKey := privKey.PubKey() + signatures = append(signatures, signingtypes.SignatureV2{ + PubKey: pubKey, + Sequence: nonce, + Data: &signingtypes.SingleSignatureData{}, + }) + } + setTxSignatureWithSecret(t, builder, signatures...) + return builder.GetTx() +} + +func setTxSignatureWithSecret(t *testing.T, builder client.TxBuilder, signatures ...signingtypes.SignatureV2) { + t.Helper() + err := builder.SetSignatures( + signatures..., + ) + require.NoError(t, err) +} + +func extendedCommitToLastCommit(ec abci.ExtendedCommitInfo) (abci.ExtendedCommitInfo, comet.BlockInfo) { + // sort the extended commit info + sort.Sort(extendedVoteInfos(ec.Votes)) + + // convert the extended commit info to last commit info + lastCommit := abci.CommitInfo{ + Round: ec.Round, + Votes: make([]abci.VoteInfo, len(ec.Votes)), + } + + for i, vote := range ec.Votes { + lastCommit.Votes[i] = abci.VoteInfo{ + Validator: abci.Validator{ + Address: vote.Validator.Address, + Power: vote.Validator.Power, + }, + } + } + + return ec, baseapp.NewBlockInfo( + nil, + nil, + nil, + lastCommit, + ) +} + +type extendedVoteInfos []abci.ExtendedVoteInfo + +func (v extendedVoteInfos) Len() int { + return len(v) +} + +func (v extendedVoteInfos) Less(i, j int) bool { + if v[i].Validator.Power == v[j].Validator.Power { + return bytes.Compare(v[i].Validator.Address, v[j].Validator.Address) == -1 + } + return v[i].Validator.Power > v[j].Validator.Power +} + +func (v extendedVoteInfos) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go new file mode 100644 index 0000000..4b0c51f --- /dev/null +++ b/baseapp/baseapp.go @@ -0,0 +1,1211 @@ +package baseapp + +import ( + "context" + "fmt" + "maps" + "math" + "slices" + "strconv" + "sync" + + "github.com/cockroachdb/errors" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/crypto/tmhash" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + dbm "github.com/cosmos/cosmos-db" + "github.com/cosmos/gogoproto/proto" + protov2 "google.golang.org/protobuf/proto" + + "cosmossdk.io/core/header" + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store" + storemetrics "cosmossdk.io/store/metrics" + "cosmossdk.io/store/snapshots" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp/oe" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/mempool" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +type ( + execMode uint8 + + // StoreLoader defines a customizable function to control how we load the + // CommitMultiStore from disk. This is useful for state migration, when + // loading a datastore written with an older version of the software. In + // particular, if a module changed the substore key name (or removed a substore) + // between two versions of the software. + StoreLoader func(ms storetypes.CommitMultiStore) error +) + +const ( + execModeCheck execMode = iota // Check a transaction + execModeReCheck // Recheck a (pending) transaction after a commit + execModeSimulate // Simulate a transaction + execModePrepareProposal // Prepare a block proposal + execModeProcessProposal // Process a block proposal + execModeVoteExtension // Extend or verify a pre-commit vote + execModeVerifyVoteExtension // Verify a vote extension + execModeFinalize // Finalize a block proposal +) + +var _ servertypes.ABCI = (*BaseApp)(nil) + +// BaseApp reflects the ABCI application implementation. +type BaseApp struct { + // initialized on creation + mu sync.Mutex // mu protects the fields below. + logger log.Logger + name string // application name from abci.BlockInfo + db dbm.DB // common DB backend + cms storetypes.CommitMultiStore // Main (uncached) state + qms storetypes.MultiStore // Optional alternative multistore for querying only. + storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() + grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls + msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages + interfaceRegistry codectypes.InterfaceRegistry + txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx + txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte + + mempool mempool.Mempool // application side mempool + anteHandler sdk.AnteHandler // ante handler for fee and auth + postHandler sdk.PostHandler // post handler, optional + + checkTxHandler sdk.CheckTxHandler // ABCI CheckTx handler + initChainer sdk.InitChainer // ABCI InitChain handler + preBlocker sdk.PreBlocker // logic to run before BeginBlocker + beginBlocker sdk.BeginBlocker // (legacy ABCI) BeginBlock handler + endBlocker sdk.EndBlocker // (legacy ABCI) EndBlock handler + processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler + prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal + extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler + verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler + prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState + precommiter sdk.Precommiter // logic to run during commit using the deliverState + + addrPeerFilter sdk.PeerFilter // filter peers by address and port + idPeerFilter sdk.PeerFilter // filter peers by node ID + fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed. + sigverifyTx bool // in the simulation test, since the account does not have a private key, we have to ignore the tx sigverify. + + // manages snapshots, i.e. dumps of app state at certain intervals + snapshotManager *snapshots.Manager + + // volatile states: + // + // - checkState is set on InitChain and reset on Commit + // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil + // on Commit. + // + // - checkState: Used for CheckTx, which is set based on the previous block's + // state. This state is never committed. + // + // - prepareProposalState: Used for PrepareProposal, which is set based on the + // previous block's state. This state is never committed. In case of multiple + // consensus rounds, the state is always reset to the previous block's state. + // + // - processProposalState: Used for ProcessProposal, which is set based on the + // the previous block's state. This state is never committed. In case of + // multiple rounds, the state is always reset to the previous block's state. + // + // - finalizeBlockState: Used for FinalizeBlock, which is set based on the + // previous block's state. This state is committed. + checkState *state + prepareProposalState *state + processProposalState *state + finalizeBlockState *state + + // An inter-block write-through cache provided to the context during the ABCI + // FinalizeBlock call. + interBlockCache storetypes.MultiStorePersistentCache + + // paramStore is used to query for ABCI consensus parameters from an + // application parameter store. + paramStore ParamStore + + // queryGasLimit defines the maximum gas for queries; unbounded if 0. + queryGasLimit uint64 + + // The minimum gas prices a validator is willing to accept for processing a + // transaction. This is mainly used for DoS and spam prevention. + minGasPrices sdk.DecCoins + + // initialHeight is the initial height at which we start the BaseApp + initialHeight int64 + + // flag for sealing options and parameters to a BaseApp + sealed bool + + // block height at which to halt the chain and gracefully shutdown + haltHeight uint64 + + // minimum block time (in Unix seconds) at which to halt the chain and gracefully shutdown + haltTime uint64 + + // minRetainBlocks defines the minimum block height offset from the current + // block being committed, such that all blocks past this offset are pruned + // from CometBFT. It is used as part of the process of determining the + // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates + // that no blocks should be pruned. + // + // Note: CometBFT block pruning is dependant on this parameter in conjunction + // with the unbonding (safety threshold) period, state pruning and state sync + // snapshot parameters to determine the correct minimum value of + // ResponseCommit.RetainHeight. + minRetainBlocks uint64 + + // application's version string + version string + + // application's protocol version that increments on every upgrade + // if BaseApp is passed to the upgrade keeper's NewKeeper method. + appVersion uint64 + + // recovery handler for app.runTx method + runTxRecoveryMiddleware recoveryMiddleware + + // trace set will return full stack traces for errors in ABCI Log field + trace bool + + // indexEvents defines the set of events in the form {eventType}.{attributeKey}, + // which informs CometBFT what to index. If empty, all events will be indexed. + indexEvents map[string]struct{} + + // streamingManager for managing instances and configuration of ABCIListener services + streamingManager storetypes.StreamingManager + + chainID string + + cdc codec.Codec + + // optimisticExec contains the context required for Optimistic Execution, + // including the goroutine handling.This is experimental and must be enabled + // by developers. + optimisticExec *oe.OptimisticExecution + + // disableBlockGasMeter will disable the block gas meter if true, block gas meter is tricky to support + // when executing transactions in parallel. + // when disabled, the block gas meter in context is a noop one. + // + // SAFETY: it's safe to do if validators validate the total gas wanted in the `ProcessProposal`, which is the case in the default handler. + disableBlockGasMeter bool +} + +// NewBaseApp returns a reference to an initialized BaseApp. It accepts a +// variadic number of option functions, which act on the BaseApp to set +// configuration choices. +func NewBaseApp( + name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp), +) *BaseApp { + app := &BaseApp{ + logger: logger.With(log.ModuleKey, "baseapp"), + name: name, + db: db, + cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store + storeLoader: DefaultStoreLoader, + grpcQueryRouter: NewGRPCQueryRouter(), + msgServiceRouter: NewMsgServiceRouter(), + txDecoder: txDecoder, + fauxMerkleMode: false, + sigverifyTx: true, + queryGasLimit: math.MaxUint64, + } + + for _, option := range options { + option(app) + } + + if app.mempool == nil { + app.SetMempool(mempool.NoOpMempool{}) + } + + abciProposalHandler := NewDefaultProposalHandler(app.mempool, app) + + if app.prepareProposal == nil { + app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler()) + } + if app.processProposal == nil { + app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler()) + } + if app.extendVote == nil { + app.SetExtendVoteHandler(NoOpExtendVote()) + } + if app.verifyVoteExt == nil { + app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler()) + } + if app.interBlockCache != nil { + app.cms.SetInterBlockCache(app.interBlockCache) + } + + app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware() + + // Initialize with an empty interface registry to avoid nil pointer dereference. + // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic. + app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) + + protoFiles, err := proto.MergedRegistry() + if err != nil { + logger.Warn("error creating merged proto registry", "error", err) + } else { + err = msgservice.ValidateProtoAnnotations(protoFiles) + if err != nil { + // Once we switch to using protoreflect-based antehandlers, we might + // want to panic here instead of logging a warning. + logger.Warn("error validating merged proto registry annotations", "error", err) + } + } + + return app +} + +// Name returns the name of the BaseApp. +func (app *BaseApp) Name() string { + return app.name +} + +// AppVersion returns the application's protocol version. +func (app *BaseApp) AppVersion() uint64 { + return app.appVersion +} + +// Version returns the application's version string. +func (app *BaseApp) Version() string { + return app.version +} + +// Logger returns the logger of the BaseApp. +func (app *BaseApp) Logger() log.Logger { + return app.logger +} + +// Trace returns the boolean value for logging error stack traces. +func (app *BaseApp) Trace() bool { + return app.trace +} + +// MsgServiceRouter returns the MsgServiceRouter of a BaseApp. +func (app *BaseApp) MsgServiceRouter() *MsgServiceRouter { return app.msgServiceRouter } + +// GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp. +func (app *BaseApp) GRPCQueryRouter() *GRPCQueryRouter { return app.grpcQueryRouter } + +// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp +// multistore. +func (app *BaseApp) MountStores(keys ...storetypes.StoreKey) { + for _, key := range keys { + switch key.(type) { + case *storetypes.KVStoreKey: + if !app.fauxMerkleMode { + app.MountStore(key, storetypes.StoreTypeIAVL) + } else { + // StoreTypeDB doesn't do anything upon commit, and it doesn't + // retain history, but it's useful for faster simulation. + app.MountStore(key, storetypes.StoreTypeDB) + } + + case *storetypes.TransientStoreKey: + app.MountStore(key, storetypes.StoreTypeTransient) + + case *storetypes.MemoryStoreKey: + app.MountStore(key, storetypes.StoreTypeMemory) + + default: + panic(fmt.Sprintf("Unrecognized store key type :%T", key)) + } + } +} + +// MountKVStores mounts all IAVL or DB stores to the provided keys in the +// BaseApp multistore. +func (app *BaseApp) MountKVStores(keys map[string]*storetypes.KVStoreKey) { + for _, key := range keys { + if !app.fauxMerkleMode { + app.MountStore(key, storetypes.StoreTypeIAVL) + } else { + // StoreTypeDB doesn't do anything upon commit, and it doesn't + // retain history, but it's useful for faster simulation. + app.MountStore(key, storetypes.StoreTypeDB) + } + } +} + +// MountTransientStores mounts all transient stores to the provided keys in +// the BaseApp multistore. +func (app *BaseApp) MountTransientStores(keys map[string]*storetypes.TransientStoreKey) { + for _, key := range keys { + app.MountStore(key, storetypes.StoreTypeTransient) + } +} + +// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal +// commit multi-store. +func (app *BaseApp) MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) { + skeys := slices.Sorted(maps.Keys(keys)) + for _, key := range skeys { + memKey := keys[key] + app.MountStore(memKey, storetypes.StoreTypeMemory) + } +} + +// MountStore mounts a store to the provided key in the BaseApp multistore, +// using the default DB. +func (app *BaseApp) MountStore(key storetypes.StoreKey, typ storetypes.StoreType) { + app.cms.MountStoreWithDB(key, typ, nil) +} + +// LoadLatestVersion loads the latest application version. It will panic if +// called more than once on a running BaseApp. +func (app *BaseApp) LoadLatestVersion() error { + err := app.storeLoader(app.cms) + if err != nil { + return fmt.Errorf("failed to load latest version: %w", err) + } + + return app.Init() +} + +// DefaultStoreLoader will be used by default and loads the latest version +func DefaultStoreLoader(ms storetypes.CommitMultiStore) error { + return ms.LoadLatestVersion() +} + +// CommitMultiStore returns the root multi-store. +// App constructor can use this to access the `cms`. +// UNSAFE: must not be used during the abci life cycle. +func (app *BaseApp) CommitMultiStore() storetypes.CommitMultiStore { + return app.cms +} + +// SnapshotManager returns the snapshot manager. +// application use this to register extra extension snapshotters. +func (app *BaseApp) SnapshotManager() *snapshots.Manager { + return app.snapshotManager +} + +// LoadVersion loads the BaseApp application version. It will panic if called +// more than once on a running baseapp. +func (app *BaseApp) LoadVersion(version int64) error { + app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n") + err := app.cms.LoadVersion(version) + if err != nil { + return fmt.Errorf("failed to load version %d: %w", version, err) + } + + return app.Init() +} + +// LastCommitID returns the last CommitID of the multistore. +func (app *BaseApp) LastCommitID() storetypes.CommitID { + return app.cms.LastCommitID() +} + +// LastBlockHeight returns the last committed block height. +func (app *BaseApp) LastBlockHeight() int64 { + return app.cms.LastCommitID().Version +} + +// ChainID returns the chainID of the app. +func (app *BaseApp) ChainID() string { + return app.chainID +} + +// AnteHandler returns the AnteHandler of the app. +func (app *BaseApp) AnteHandler() sdk.AnteHandler { + return app.anteHandler +} + +// Mempool returns the Mempool of the app. +func (app *BaseApp) Mempool() mempool.Mempool { + return app.mempool +} + +// Init initializes the app. It seals the app, preventing any +// further modifications. In addition, it validates the app against +// the earlier provided settings. Returns an error if validation fails. +// nil otherwise. Panics if the app is already sealed. +func (app *BaseApp) Init() error { + if app.sealed { + panic("cannot call initFromMainStore: baseapp already sealed") + } + + if app.cms == nil { + return errors.New("commit multi-store must not be nil") + } + + emptyHeader := cmtproto.Header{ChainID: app.chainID} + + // needed for the export command which inits from store but never calls initchain + app.setState(execModeCheck, emptyHeader) + app.Seal() + + return app.cms.GetPruning().Validate() +} + +func (app *BaseApp) setMinGasPrices(gasPrices sdk.DecCoins) { + app.minGasPrices = gasPrices +} + +func (app *BaseApp) setHaltHeight(haltHeight uint64) { + app.haltHeight = haltHeight +} + +func (app *BaseApp) setHaltTime(haltTime uint64) { + app.haltTime = haltTime +} + +func (app *BaseApp) setMinRetainBlocks(minRetainBlocks uint64) { + app.minRetainBlocks = minRetainBlocks +} + +func (app *BaseApp) setInterBlockCache(cache storetypes.MultiStorePersistentCache) { + app.interBlockCache = cache +} + +func (app *BaseApp) setTrace(trace bool) { + app.trace = trace +} + +func (app *BaseApp) setIndexEvents(ie []string) { + app.indexEvents = make(map[string]struct{}) + + for _, e := range ie { + app.indexEvents[e] = struct{}{} + } +} + +// Seal seals a BaseApp. It prohibits any further modifications to a BaseApp. +func (app *BaseApp) Seal() { app.sealed = true } + +// IsSealed returns true if the BaseApp is sealed and false otherwise. +func (app *BaseApp) IsSealed() bool { return app.sealed } + +// setState sets the BaseApp's state for the corresponding mode with a branched +// multi-store (i.e. a CacheMultiStore) and a new Context with the same +// multi-store branch, and provided header. +func (app *BaseApp) setState(mode execMode, h cmtproto.Header) { + ms := app.cms.CacheMultiStore() + headerInfo := header.Info{ + Height: h.Height, + Time: h.Time, + ChainID: h.ChainID, + AppHash: h.AppHash, + } + baseState := &state{ + ms: ms, + ctx: sdk.NewContext(ms, h, false, app.logger). + WithStreamingManager(app.streamingManager). + WithHeaderInfo(headerInfo), + } + + switch mode { + case execModeCheck: + baseState.SetContext(baseState.Context().WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices)) + app.checkState = baseState + + case execModePrepareProposal: + app.prepareProposalState = baseState + + case execModeProcessProposal: + app.processProposalState = baseState + + case execModeFinalize: + app.finalizeBlockState = baseState + + default: + panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode)) + } +} + +// SetCircuitBreaker sets the circuit breaker for the BaseApp. +// The circuit breaker is checked on every message execution to verify if a transaction should be executed or not. +func (app *BaseApp) SetCircuitBreaker(cb CircuitBreaker) { + if app.msgServiceRouter == nil { + panic("cannot set circuit breaker with no msg service router set") + } + app.msgServiceRouter.SetCircuit(cb) +} + +// GetConsensusParams returns the current consensus parameters from the BaseApp's +// ParamStore. If the BaseApp has no ParamStore defined, nil is returned. +func (app *BaseApp) GetConsensusParams(ctx sdk.Context) cmtproto.ConsensusParams { + if app.paramStore == nil { + return cmtproto.ConsensusParams{} + } + + cp, err := app.paramStore.Get(ctx) + if err != nil { + // This could happen while migrating from v0.45/v0.46 to v0.50, we should + // allow it to happen so during preblock the upgrade plan can be executed + // and the consensus params set for the first time in the new format. + app.logger.Error("failed to get consensus params", "err", err) + return cmtproto.ConsensusParams{} + } + + return cp +} + +// StoreConsensusParams sets the consensus parameters to the BaseApp's param +// store. +// +// NOTE: We're explicitly not storing the CometBFT app_version in the param store. +// It's stored instead in the x/upgrade store, with its own bump logic. +func (app *BaseApp) StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams) error { + if app.paramStore == nil { + return errors.New("cannot store consensus params with no params store set") + } + + return app.paramStore.Set(ctx, cp) +} + +// AddRunTxRecoveryHandler adds custom app.runTx method panic handlers. +func (app *BaseApp) AddRunTxRecoveryHandler(handlers ...RecoveryHandler) { + for _, h := range handlers { + app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware) + } +} + +// GetMaximumBlockGas gets the maximum gas from the consensus params. It panics +// if maximum block gas is less than negative one and returns zero if negative +// one. +func (app *BaseApp) GetMaximumBlockGas(ctx sdk.Context) uint64 { + cp := app.GetConsensusParams(ctx) + if cp.Block == nil { + return 0 + } + + maxGas := cp.Block.MaxGas + + switch { + case maxGas < -1: + panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas)) + + case maxGas == -1: + return 0 + + default: + return uint64(maxGas) + } +} + +func (app *BaseApp) validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock) error { + if req.Height < 1 { + return fmt.Errorf("invalid height: %d", req.Height) + } + + lastBlockHeight := app.LastBlockHeight() + + // expectedHeight holds the expected height to validate + var expectedHeight int64 + if lastBlockHeight == 0 && app.initialHeight > 1 { + // In this case, we're validating the first block of the chain, i.e no + // previous commit. The height we're expecting is the initial height. + expectedHeight = app.initialHeight + } else { + // This case can mean two things: + // + // - Either there was already a previous commit in the store, in which + // case we increment the version from there. + // - Or there was no previous commit, in which case we start at version 1. + expectedHeight = lastBlockHeight + 1 + } + + if req.Height != expectedHeight { + return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight) + } + + return nil +} + +// validateBasicTxMsgs executes basic validator calls for messages. +func validateBasicTxMsgs(msgs []sdk.Msg) error { + if len(msgs) == 0 { + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message") + } + + for _, msg := range msgs { + m, ok := msg.(sdk.HasValidateBasic) + if !ok { + continue + } + + if err := m.ValidateBasic(); err != nil { + return err + } + } + + return nil +} + +func (app *BaseApp) getState(mode execMode) *state { + switch mode { + case execModeFinalize: + return app.finalizeBlockState + + case execModePrepareProposal: + return app.prepareProposalState + + case execModeProcessProposal: + return app.processProposalState + + default: + return app.checkState + } +} + +func (app *BaseApp) getBlockGasMeter(ctx sdk.Context) storetypes.GasMeter { + if app.disableBlockGasMeter { + return noopGasMeter{} + } + + if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 { + return storetypes.NewGasMeter(maxGas) + } + + return storetypes.NewInfiniteGasMeter() +} + +// retrieve the context for the tx w/ txBytes and other memoized values. +func (app *BaseApp) getContextForTx(mode execMode, txBytes []byte) sdk.Context { + app.mu.Lock() + defer app.mu.Unlock() + + modeState := app.getState(mode) + if modeState == nil { + panic(fmt.Sprintf("state is nil for mode %v", mode)) + } + ctx := modeState.Context(). + WithTxBytes(txBytes). + WithGasMeter(storetypes.NewInfiniteGasMeter()) + // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed + + ctx = ctx.WithIsSigverifyTx(app.sigverifyTx) + + ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx)) + + if mode == execModeReCheck { + ctx = ctx.WithIsReCheckTx(true) + } + + if mode == execModeSimulate { + ctx, _ = ctx.CacheContext() + ctx = ctx.WithExecMode(sdk.ExecMode(execModeSimulate)) + } + + return ctx +} + +// cacheTxContext returns a new context based off of the provided context with +// a branched multi-store. +func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) { + ms := ctx.MultiStore() + msCache := ms.CacheMultiStore() + if msCache.TracingEnabled() { + msCache = msCache.SetTracingContext( + storetypes.TraceContext( + map[string]any{ + "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)), + }, + ), + ).(storetypes.CacheMultiStore) + } + + return ctx.WithMultiStore(msCache), msCache +} + +func (app *BaseApp) preBlock(req *abci.RequestFinalizeBlock) ([]abci.Event, error) { + var events []abci.Event + if app.preBlocker != nil { + ctx := app.finalizeBlockState.Context().WithEventManager(sdk.NewEventManager()) + rsp, err := app.preBlocker(ctx, req) + if err != nil { + return nil, err + } + // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed + // write the consensus parameters in store to context + if rsp.ConsensusParamsChanged { + ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx)) + // GasMeter must be set after we get a context with updated consensus params. + gasMeter := app.getBlockGasMeter(ctx) + ctx = ctx.WithBlockGasMeter(gasMeter) + app.finalizeBlockState.SetContext(ctx) + } + events = ctx.EventManager().ABCIEvents() + events = sdk.MarkEventsToIndex(events, app.indexEvents) + } + return events, nil +} + +func (app *BaseApp) beginBlock(_ *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) { + var ( + resp sdk.BeginBlock + err error + ) + + if app.beginBlocker != nil { + resp, err = app.beginBlocker(app.finalizeBlockState.Context()) + if err != nil { + return resp, err + } + + // append BeginBlock attributes to all events in the EndBlock response + for i, event := range resp.Events { + resp.Events[i].Attributes = append( + event.Attributes, + abci.EventAttribute{Key: "mode", Value: "BeginBlock"}, + ) + } + + resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents) + } + + return resp, nil +} + +func (app *BaseApp) deliverTx(tx []byte) *abci.ExecTxResult { + gInfo := sdk.GasInfo{} + resultStr := "successful" + + var resp *abci.ExecTxResult + + defer func() { + telemetry.IncrCounter(1, "tx", "count") + telemetry.IncrCounter(1, "tx", resultStr) + telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used") + telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted") + }() + + gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx, nil) + if err != nil { + resultStr = "failed" + resp = sdkerrors.ResponseExecTxResultWithEvents( + err, + gInfo.GasWanted, + gInfo.GasUsed, + sdk.MarkEventsToIndex(anteEvents, app.indexEvents), + app.trace, + ) + return resp + } + + resp = &abci.ExecTxResult{ + GasWanted: int64(gInfo.GasWanted), + GasUsed: int64(gInfo.GasUsed), + Log: result.Log, + Data: result.Data, + Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), + } + + return resp +} + +// endBlock is an application-defined function that is called after transactions +// have been processed in FinalizeBlock. +func (app *BaseApp) endBlock(_ context.Context) (sdk.EndBlock, error) { + var endblock sdk.EndBlock + + if app.endBlocker != nil { + eb, err := app.endBlocker(app.finalizeBlockState.Context()) + if err != nil { + return endblock, err + } + + // append EndBlock attributes to all events in the EndBlock response + for i, event := range eb.Events { + eb.Events[i].Attributes = append( + event.Attributes, + abci.EventAttribute{Key: "mode", Value: "EndBlock"}, + ) + } + + eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents) + endblock = eb + } + + return endblock, nil +} + +// runTx processes a transaction within a given execution mode, encoded transaction +// bytes, and the decoded transaction itself. All state transitions occur through +// a cached Context depending on the mode provided. State only gets persisted +// if all messages get executed successfully and the execution mode is DeliverTx. +// Note, gas execution info is always returned. A reference to a Result is +// returned if the tx does not run out of gas and if all the messages are valid +// and execute successfully. An error is returned otherwise. +// both txbytes and the decoded tx are passed to runTx to avoid the state machine encoding the tx and decoding the transaction twice +// passing the decoded tx to runTX is optional, it will be decoded if the tx is nil +func (app *BaseApp) runTx(mode execMode, txBytes []byte, tx sdk.Tx) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) { + // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is + // determined by the GasMeter. We need access to the context to get the gas + // meter, so we initialize upfront. + var gasWanted uint64 + + ctx := app.getContextForTx(mode, txBytes) + ms := ctx.MultiStore() + + // only run the tx if there is block gas remaining + if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() { + return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx") + } + + defer func() { + if r := recover(); r != nil { + recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware) + err, result = processRecovery(r, recoveryMW), nil + ctx.Logger().Error("panic recovered in runTx", "err", err) + } + + gInfo = sdk.GasInfo{GasWanted: gasWanted, GasUsed: ctx.GasMeter().GasConsumed()} + }() + + blockGasConsumed := false + + // consumeBlockGas makes sure block gas is consumed at most once. It must + // happen after tx processing, and must be executed even if tx processing + // fails. Hence, it's execution is deferred. + consumeBlockGas := func() { + if !blockGasConsumed { + blockGasConsumed = true + ctx.BlockGasMeter().ConsumeGas( + ctx.GasMeter().GasConsumedToLimit(), "block gas meter", + ) + } + } + + // If BlockGasMeter() panics it will be caught by the above recover and will + // return an error - in any case BlockGasMeter will consume gas past the limit. + // + // NOTE: consumeBlockGas must exist in a separate defer function from the + // general deferred recovery function to recover from consumeBlockGas as it'll + // be executed first (deferred statements are executed as stack). + if mode == execModeFinalize { + defer consumeBlockGas() + } + + // if the transaction is not decoded, decode it here + if tx == nil { + tx, err = app.txDecoder(txBytes) + if err != nil { + return sdk.GasInfo{GasUsed: 0, GasWanted: 0}, nil, nil, sdkerrors.ErrTxDecode.Wrap(err.Error()) + } + } + + msgs := tx.GetMsgs() + if err := validateBasicTxMsgs(msgs); err != nil { + return sdk.GasInfo{}, nil, nil, err + } + + for _, msg := range msgs { + handler := app.msgServiceRouter.Handler(msg) + if handler == nil { + return sdk.GasInfo{}, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg) + } + } + + if app.anteHandler != nil { + var ( + anteCtx sdk.Context + msCache storetypes.CacheMultiStore + ) + + // Branch context before AnteHandler call in case it aborts. + // This is required for both CheckTx and DeliverTx. + // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772 + // + // NOTE: Alternatively, we could require that AnteHandler ensures that + // writes do not happen if aborted/failed. This may have some + // performance benefits, but it'll be more difficult to get right. + anteCtx, msCache = app.cacheTxContext(ctx, txBytes) + anteCtx = anteCtx.WithEventManager(sdk.NewEventManager()) + newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate) + + if !newCtx.IsZero() { + // At this point, newCtx.MultiStore() is a store branch, or something else + // replaced by the AnteHandler. We want the original multistore. + // + // Also, in the case of the tx aborting, we need to track gas consumed via + // the instantiated gas meter in the AnteHandler, so we update the context + // prior to returning. + ctx = newCtx.WithMultiStore(ms) + } + + events := ctx.EventManager().Events() + + // GasMeter expected to be set in AnteHandler + gasWanted = ctx.GasMeter().Limit() + + if err != nil { + if mode == execModeReCheck { + // if the ante handler fails on recheck, we want to remove the tx from the mempool + if mempoolErr := app.mempool.Remove(tx); mempoolErr != nil { + return gInfo, nil, anteEvents, errors.Join(err, mempoolErr) + } + } + return gInfo, nil, nil, err + } + + msCache.Write() + anteEvents = events.ToABCIEvents() + } + + switch mode { + case execModeCheck: + err = app.mempool.Insert(ctx, tx) + if err != nil { + return gInfo, nil, anteEvents, err + } + case execModeFinalize: + err = app.mempool.Remove(tx) + if err != nil && !errors.Is(err, mempool.ErrTxNotFound) { + return gInfo, nil, anteEvents, + fmt.Errorf("failed to remove tx from mempool: %w", err) + } + } + + // Create a new Context based off of the existing Context with a MultiStore branch + // in case message processing fails. At this point, the MultiStore + // is a branch of a branch. + runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes) + + // Attempt to execute all messages and only update state if all messages pass + // and we're in DeliverTx. Note, runMsgs will never return a reference to a + // Result if any single message fails or does not have a registered Handler. + msgsV2, err := tx.GetMsgsV2() + if err == nil { + result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode) + } + + // Run optional postHandlers (should run regardless of the execution result). + // + // Note: If the postHandler fails, we also revert the runMsgs state. + if app.postHandler != nil { + // The runMsgCtx context currently contains events emitted by the ante handler. + // We clear this to correctly order events without duplicates. + // Note that the state is still preserved. + postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager()) + + newCtx, errPostHandler := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil) + if errPostHandler != nil { + if err == nil { + // when the msg was handled successfully, return the post handler error only + return gInfo, nil, anteEvents, errPostHandler + } + // otherwise append to the msg error so that we keep the original error code for better user experience + return gInfo, nil, anteEvents, errorsmod.Wrapf(err, "postHandler: %s", errPostHandler) + } + + // we don't want runTx to panic if runMsgs has failed earlier + if result == nil { + result = &sdk.Result{} + } + result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...) + } + + if err == nil { + if mode == execModeFinalize { + // When block gas exceeds, it'll panic and won't commit the cached store. + consumeBlockGas() + + msCache.Write() + } + + if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) { + // append the events in the order of occurrence + result.Events = append(anteEvents, result.Events...) + } + } + + return gInfo, result, anteEvents, err +} + +// runMsgs iterates through a list of messages and executes them with the provided +// Context and execution mode. Messages will only be executed during simulation +// and DeliverTx. An error is returned if any single message fails or if a +// Handler does not exist for a given message route. Otherwise, a reference to a +// Result is returned. The caller must not commit state if an error is returned. +func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) { + events := sdk.EmptyEvents() + var msgResponses []*codectypes.Any + + // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter. + for i, msg := range msgs { + if mode != execModeFinalize && mode != execModeSimulate { + break + } + + handler := app.msgServiceRouter.Handler(msg) + if handler == nil { + return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg) + } + + // ADR 031 request type routing + msgResult, err := handler(ctx, msg) + if err != nil { + return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i) + } + + // create message events + msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i]) + if err != nil { + return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i) + } + + // append message events and data + // + // Note: Each message result's data must be length-prefixed in order to + // separate each result. + for j, event := range msgEvents { + // append message index to all events + msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i))) + } + + events = events.AppendEvents(msgEvents) + + // Each individual sdk.Result that went through the MsgServiceRouter + // (which should represent 99% of the Msgs now, since everyone should + // be using protobuf Msgs) has exactly one Msg response, set inside + // `WrapServiceResult`. We take that Msg response, and aggregate it + // into an array. + if len(msgResult.MsgResponses) > 0 { + msgResponse := msgResult.MsgResponses[0] + if msgResponse == nil { + return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg)) + } + msgResponses = append(msgResponses, msgResponse) + } + + } + + data, err := makeABCIData(msgResponses) + if err != nil { + return nil, errorsmod.Wrap(err, "failed to marshal tx data") + } + + return &sdk.Result{ + Data: data, + Events: events.ToABCIEvents(), + MsgResponses: msgResponses, + }, nil +} + +// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx. +func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) { + return proto.Marshal(&sdk.TxMsgData{MsgResponses: msgResponses}) +} + +func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) { + eventMsgName := sdk.MsgTypeURL(msg) + msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName)) + + // we set the signer attribute as the sender + signers, err := cdc.GetMsgV2Signers(msgV2) + if err != nil { + return nil, err + } + if len(signers) > 0 && signers[0] != nil { + addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0]) + if err != nil { + return nil, err + } + msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr)) + } + + // verify that events have no module attribute set + if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found { + if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" { + msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName)) + } + } + + return sdk.Events{msgEvent}.AppendEvents(events), nil +} + +// PrepareProposalVerifyTx performs transaction verification when a proposer is +// creating a block proposal during PrepareProposal. Any state committed to the +// PrepareProposal state internally will be discarded. will be +// returned if the transaction cannot be encoded. will be returned if +// the transaction is valid, otherwise will be returned. +func (app *BaseApp) PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) { + bz, err := app.txEncoder(tx) + if err != nil { + return nil, err + } + + _, _, _, err = app.runTx(execModePrepareProposal, bz, tx) + if err != nil { + return nil, err + } + + return bz, nil +} + +// ProcessProposalVerifyTx performs transaction verification when receiving a +// block proposal during ProcessProposal. Any state committed to the +// ProcessProposal state internally will be discarded. will be +// returned if the transaction cannot be decoded. will be returned if +// the transaction is valid, otherwise will be returned. +func (app *BaseApp) ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) { + tx, err := app.txDecoder(txBz) + if err != nil { + return nil, err + } + + _, _, _, err = app.runTx(execModeProcessProposal, txBz, tx) + if err != nil { + return nil, err + } + + return tx, nil +} + +func (app *BaseApp) TxDecode(txBytes []byte) (sdk.Tx, error) { + return app.txDecoder(txBytes) +} + +func (app *BaseApp) TxEncode(tx sdk.Tx) ([]byte, error) { + return app.txEncoder(tx) +} + +func (app *BaseApp) StreamingManager() storetypes.StreamingManager { + return app.streamingManager +} + +// Close is called in start cmd to gracefully cleanup resources. +func (app *BaseApp) Close() error { + var errs []error + + // Close app.db (opened by cosmos-sdk/server/start.go call to openDB) + if app.db != nil { + app.logger.Info("Closing application.db") + if err := app.db.Close(); err != nil { + errs = append(errs, err) + } + } + + // Close app.snapshotManager + // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate) + // - which calls cosmos-sdk/server/util.go/GetSnapshotStore + // - which is passed to baseapp/options.go/SetSnapshot + // - to set app.snapshotManager = snapshots.NewManager + if app.snapshotManager != nil { + app.logger.Info("Closing snapshots/metadata.db") + if err := app.snapshotManager.Close(); err != nil { + errs = append(errs, err) + } + } + + return errors.Join(errs...) +} + +// GetBaseApp returns the pointer to itself. +func (app *BaseApp) GetBaseApp() *BaseApp { + return app +} diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go new file mode 100644 index 0000000..b0fb868 --- /dev/null +++ b/baseapp/baseapp_test.go @@ -0,0 +1,1025 @@ +package baseapp_test + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "math/rand" + "testing" + "time" + + abci "github.com/cometbft/cometbft/abci/types" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/log" + "cosmossdk.io/store/metrics" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/rootmulti" + "cosmossdk.io/store/snapshots" + snapshottypes "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectestutil "github.com/cosmos/cosmos-sdk/codec/testutil" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" +) + +var ( + capKey1 = storetypes.NewKVStoreKey("key1") + capKey2 = storetypes.NewKVStoreKey("key2") + + // testTxPriority is the CheckTx priority that we set in the test + // AnteHandler. + testTxPriority = int64(42) +) + +type ( + BaseAppSuite struct { + baseApp *baseapp.BaseApp + cdc *codec.ProtoCodec + txConfig client.TxConfig + logBuffer *bytes.Buffer + } + + SnapshotsConfig struct { + blocks uint64 + blockTxs int + snapshotInterval uint64 + snapshotKeepRecent uint32 + pruningOpts pruningtypes.PruningOptions + } +) + +func NewBaseAppSuite(t *testing.T, opts ...func(*baseapp.BaseApp)) *BaseAppSuite { + t.Helper() + + cdc := codectestutil.CodecOptions{}.NewCodec() + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + db := dbm.NewMemDB() + logBuffer := new(bytes.Buffer) + logger := log.NewLogger(logBuffer, log.ColorOption(false)) + + app := baseapp.NewBaseApp(t.Name(), logger, db, txConfig.TxDecoder(), opts...) + require.Equal(t, t.Name(), app.Name()) + + app.SetInterfaceRegistry(cdc.InterfaceRegistry()) + app.MsgServiceRouter().SetInterfaceRegistry(cdc.InterfaceRegistry()) + app.MountStores(capKey1, capKey2) + app.SetParamStore(paramStore{db: dbm.NewMemDB()}) + app.SetTxDecoder(txConfig.TxDecoder()) + app.SetTxEncoder(txConfig.TxEncoder()) + + // mount stores and seal + require.Nil(t, app.LoadLatestVersion()) + + return &BaseAppSuite{ + baseApp: app, + cdc: cdc, + txConfig: txConfig, + logBuffer: logBuffer, + } +} + +func getQueryBaseapp(t *testing.T) *baseapp.BaseApp { + t.Helper() + + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + return app +} + +func NewBaseAppSuiteWithSnapshots(t *testing.T, cfg SnapshotsConfig, opts ...func(*baseapp.BaseApp)) *BaseAppSuite { + t.Helper() + + snapshotTimeout := 1 * time.Minute + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) + require.NoError(t, err) + + suite := NewBaseAppSuite( + t, + append( + opts, + baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(cfg.snapshotInterval, cfg.snapshotKeepRecent)), + baseapp.SetPruning(cfg.pruningOpts), + )..., + ) + + baseapptestutil.RegisterKeyValueServer(suite.baseApp.MsgServiceRouter(), MsgKeyValueImpl{}) + + _, err = suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + r := rand.New(rand.NewSource(3920758213583)) + keyCounter := 0 + + for height := int64(1); height <= int64(cfg.blocks); height++ { + + _, _, addr := testdata.KeyTestPubAddr() + txs := [][]byte{} + for range cfg.blockTxs { + var msgs []sdk.Msg + for range 100 { + key := fmt.Appendf(nil, "%v", keyCounter) + value := make([]byte, 10000) + + _, err := r.Read(value) + require.NoError(t, err) + + msgs = append(msgs, &baseapptestutil.MsgKeyValue{Key: key, Value: value, Signer: addr.String()}) + keyCounter++ + } + + builder := suite.txConfig.NewTxBuilder() + require.NoError(t, builder.SetMsgs(msgs...)) + setTxSignature(t, builder, 0) + + txBytes, err := suite.txConfig.TxEncoder()(builder.GetTx()) + require.NoError(t, err) + + txs = append(txs, txBytes) + } + + _, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ + Height: height, + Txs: txs, + }) + require.NoError(t, err) + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + // wait for snapshot to be taken, since it happens asynchronously + if cfg.snapshotInterval > 0 && uint64(height)%cfg.snapshotInterval == 0 { + start := time.Now() + for { + if time.Since(start) > snapshotTimeout { + t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) + } + + snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) + require.NoError(t, err) + + if snapshot != nil { + break + } + + time.Sleep(100 * time.Millisecond) + } + } + } + + return suite +} + +func TestAnteHandlerGasMeter(t *testing.T) { + // run BeginBlock and assert that the gas meter passed into the first Txn is zeroed out + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + gasMeter := ctx.BlockGasMeter() + require.NotNil(t, gasMeter) + require.Equal(t, storetypes.Gas(0), gasMeter.GasConsumed()) + return ctx, nil + }) + } + // set the beginBlocker to use some gas + beginBlockerOpt := func(bapp *baseapp.BaseApp) { + bapp.SetBeginBlocker(func(ctx sdk.Context) (sdk.BeginBlock, error) { + ctx.BlockGasMeter().ConsumeGas(1, "beginBlocker gas consumption") + return sdk.BeginBlock{}, nil + }) + } + + suite := NewBaseAppSuite(t, anteOpt, beginBlockerOpt) + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + tx := newTxCounter(t, suite.txConfig, 0, 0) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) +} + +func TestLoadVersion(t *testing.T) { + logger := log.NewTestLogger(t) + pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + // make a cap key and mount the store + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + + emptyHash := sha256.Sum256([]byte{}) + appHash := emptyHash[:] + emptyCommitID := storetypes.CommitID{Hash: appHash} + + // fresh store has zero/empty last commit + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, int64(0), lastHeight) + require.Equal(t, emptyCommitID, lastID) + + // execute a block, collect commit ID + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + commitID1 := storetypes.CommitID{Version: 1, Hash: res.AppHash} + _, err = app.Commit() + require.NoError(t, err) + + // execute a block, collect commit ID + res, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) + require.NoError(t, err) + commitID2 := storetypes.CommitID{Version: 2, Hash: res.AppHash} + _, err = app.Commit() + require.NoError(t, err) + + // reload with LoadLatestVersion + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + app.MountStores() + + err = app.LoadLatestVersion() + require.Nil(t, err) + + testLoadVersionHelper(t, app, int64(2), commitID2) + + // Reload with LoadVersion, see if you can commit the same block and get + // the same result. + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + err = app.LoadVersion(1) + require.Nil(t, err) + + testLoadVersionHelper(t, app, int64(1), commitID1) + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + testLoadVersionHelper(t, app, int64(2), commitID2) +} + +func TestSetLoader(t *testing.T) { + useDefaultLoader := func(app *baseapp.BaseApp) { + app.SetStoreLoader(baseapp.DefaultStoreLoader) + } + + initStore := func(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { + t.Helper() + + rs := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + + key := storetypes.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) + + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, int64(0), rs.LastCommitID().Version) + + // write some data in substore + kv, _ := rs.GetStore(key).(storetypes.KVStore) + require.NotNil(t, kv) + kv.Set(k, v) + + commitID := rs.Commit() + require.Equal(t, int64(1), commitID.Version) + } + + checkStore := func(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { + t.Helper() + + rs := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) + + key := storetypes.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) + + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, ver, rs.LastCommitID().Version) + + // query data in substore + kv, _ := rs.GetStore(key).(storetypes.KVStore) + require.NotNil(t, kv) + require.Equal(t, v, kv.Get(k)) + } + + testCases := map[string]struct { + setLoader func(*baseapp.BaseApp) + origStoreKey string + loadStoreKey string + }{ + "don't set loader": { + origStoreKey: "foo", + loadStoreKey: "foo", + }, + "default loader": { + setLoader: useDefaultLoader, + origStoreKey: "foo", + loadStoreKey: "foo", + }, + } + + k := []byte("key") + v := []byte("value") + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // prepare a db with some data + db := dbm.NewMemDB() + initStore(t, db, tc.origStoreKey, k, v) + + // load the app with the existing db + opts := []func(*baseapp.BaseApp){baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} + if tc.setLoader != nil { + opts = append(opts, tc.setLoader) + } + app := baseapp.NewBaseApp(t.Name(), log.NewTestLogger(t), db, nil, opts...) + app.MountStores(storetypes.NewKVStoreKey(tc.loadStoreKey)) + err := app.LoadLatestVersion() + require.Nil(t, err) + + // "execute" one block + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) + require.NoError(t, err) + require.NotNil(t, res.AppHash) + _, err = app.Commit() + require.NoError(t, err) + + // check db is properly updated + checkStore(t, db, 2, tc.loadStoreKey, k, v) + checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) + }) + } +} + +func TestVersionSetterGetter(t *testing.T) { + pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil, pruningOpt) + + require.Equal(t, "", app.Version()) + res, err := app.Query(context.TODO(), &abci.RequestQuery{Path: "app/version"}) + require.NoError(t, err) + require.True(t, res.IsOK()) + require.Equal(t, "", string(res.Value)) + + versionString := "1.0.0" + app.SetVersion(versionString) + require.Equal(t, versionString, app.Version()) + + res, err = app.Query(context.TODO(), &abci.RequestQuery{Path: "app/version"}) + require.NoError(t, err) + require.True(t, res.IsOK()) + require.Equal(t, versionString, string(res.Value)) +} + +func TestLoadVersionInvalid(t *testing.T) { + logger := log.NewNopLogger() + pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + err := app.LoadLatestVersion() + require.Nil(t, err) + + // require error when loading an invalid version + err = app.LoadVersion(-1) + require.Error(t, err) + + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + commitID1 := storetypes.CommitID{Version: 1, Hash: res.AppHash} + _, err = app.Commit() + require.NoError(t, err) + + // create a new app with the stores mounted under the same cap key + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + // require we can load the latest version + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + + // require error when loading an invalid version + err = app.LoadVersion(2) + require.Error(t, err) +} + +func TestOptionFunction(t *testing.T) { + testChangeNameHelper := func(name string) func(*baseapp.BaseApp) { + return func(bap *baseapp.BaseApp) { + bap.SetName(name) + } + } + + db := dbm.NewMemDB() + bap := baseapp.NewBaseApp("starting name", log.NewTestLogger(t), db, nil, testChangeNameHelper("new name")) + require.Equal(t, bap.Name(), "new name", "BaseApp should have had name changed via option function") +} + +func TestBaseAppOptionSeal(t *testing.T) { + suite := NewBaseAppSuite(t) + + require.Panics(t, func() { + suite.baseApp.SetName("") + }) + require.Panics(t, func() { + suite.baseApp.SetVersion("") + }) + require.Panics(t, func() { + suite.baseApp.SetDB(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetCMS(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetInitChainer(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetPreBlocker(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetBeginBlocker(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetEndBlocker(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetPrepareCheckStater(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetPrecommiter(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetAnteHandler(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetAddrPeerFilter(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetIDPeerFilter(nil) + }) + require.Panics(t, func() { + suite.baseApp.SetFauxMerkleMode() + }) +} + +func TestTxDecoder(t *testing.T) { + cdc := codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + + tx := newTxCounter(t, txConfig, 1, 0) + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + + dTx, err := txConfig.TxDecoder()(txBytes) + require.NoError(t, err) + + counter, _ := parseTxMemo(t, tx) + dTxCounter, _ := parseTxMemo(t, dTx) + require.Equal(t, counter, dTxCounter) +} + +func TestCustomRunTxPanicHandler(t *testing.T) { + customPanicMsg := "test panic" + anteErr := errorsmod.Register("fakeModule", 100500, "fakeError") + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + panic(errorsmod.Wrap(anteErr, "anteHandler")) + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + suite.baseApp.AddRunTxRecoveryHandler(func(recoveryObj any) error { + err, ok := recoveryObj.(error) + if !ok { + return nil + } + + if anteErr.Is(err) { + panic(customPanicMsg) + } else { + return nil + } + }) + + // transaction should panic with custom handler above + { + tx := newTxCounter(t, suite.txConfig, 0, 0) + + require.PanicsWithValue(t, customPanicMsg, func() { + bz, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{bz}}) + require.Error(t, err) + }) + } +} + +func TestBaseAppAnteHandler(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + suite := NewBaseAppSuite(t, anteOpt) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + // execute a tx that will fail ante handler execution + // + // NOTE: State should not be mutated here. This will be implicitly checked by + // the next txs ante handler execution (anteHandlerTxTest). + tx := newTxCounter(t, suite.txConfig, 0, 0) + tx = setFailOnAnte(t, suite.txConfig, tx, true) + + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.Empty(t, res.Events) + require.False(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) + + ctx := getFinalizeBlockStateCtx(suite.baseApp) + store := ctx.KVStore(capKey1) + require.Equal(t, int64(0), getIntFromStore(t, store, anteKey)) + + // execute at tx that will pass the ante handler (the checkTx state should + // mutate) but will fail the message handler + tx = newTxCounter(t, suite.txConfig, 0, 0) + tx = setFailOnHandler(suite.txConfig, tx, true) + + txBytes, err = suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.Empty(t, res.Events) + require.False(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) + + ctx = getFinalizeBlockStateCtx(suite.baseApp) + store = ctx.KVStore(capKey1) + require.Equal(t, int64(1), getIntFromStore(t, store, anteKey)) + require.Equal(t, int64(0), getIntFromStore(t, store, deliverKey)) + + // Execute a successful ante handler and message execution where state is + // implicitly checked by previous tx executions. + tx = newTxCounter(t, suite.txConfig, 1, 0) + + txBytes, err = suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.NotEmpty(t, res.TxResults[0].Events) + require.True(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) + + ctx = getFinalizeBlockStateCtx(suite.baseApp) + store = ctx.KVStore(capKey1) + require.Equal(t, int64(2), getIntFromStore(t, store, anteKey)) + require.Equal(t, int64(1), getIntFromStore(t, store, deliverKey)) + + _, err = suite.baseApp.Commit() + require.NoError(t, err) +} + +func TestBaseAppPostHandler(t *testing.T) { + postHandlerRun := false + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPostHandler(func(ctx sdk.Context, tx sdk.Tx, simulate, success bool) (newCtx sdk.Context, err error) { + postHandlerRun = true + return ctx, nil + }) + } + + suite := NewBaseAppSuite(t, anteOpt) + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, []byte("foo")}) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + // execute a tx that will fail ante handler execution + // + // NOTE: State should not be mutated here. This will be implicitly checked by + // the next txs ante handler execution (anteHandlerTxTest). + tx := newTxCounter(t, suite.txConfig, 0, 0) + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.Empty(t, res.Events) + require.True(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) + + // PostHandler runs on successful message execution + require.True(t, postHandlerRun) + + // It should also run on failed message execution + postHandlerRun = false + tx = setFailOnHandler(suite.txConfig, tx, true) + txBytes, err = suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + res, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.Empty(t, res.Events) + require.False(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) + + require.True(t, postHandlerRun) + + // regression test, should not panic when runMsgs fails + tx = wonkyMsg(t, suite.txConfig, tx) + txBytes, err = suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.NotContains(t, suite.logBuffer.String(), "panic recovered in runTx") +} + +func TestBaseAppPostHandlerErrorHandling(t *testing.T) { + specs := map[string]struct { + msgHandlerErr error + postHandlerErr error + expCode uint32 + expLog string + }{ + "msg handler ok, post ok": { + expLog: "", + expCode: 0, + }, + "msg handler fails, post ok": { + msgHandlerErr: sdkerrors.ErrUnknownRequest.Wrap("my svc error"), + expCode: sdkerrors.ErrUnknownRequest.ABCICode(), + expLog: "failed to execute message; message index: 0: my svc error: unknown request", + }, + "msg handler ok, post fails": { + postHandlerErr: sdkerrors.ErrInsufficientFunds.Wrap("my post handler error"), + expCode: sdkerrors.ErrInsufficientFunds.ABCICode(), + expLog: "my post handler error: insufficient funds", + }, + "both fail": { + msgHandlerErr: sdkerrors.ErrUnknownRequest.Wrap("my svc error"), + postHandlerErr: sdkerrors.ErrInsufficientFunds.Wrap("my post handler error"), + expCode: sdkerrors.ErrUnknownRequest.ABCICode(), + expLog: "postHandler: my post handler error: insufficient funds: failed to execute message; message index: 0: my svc error: unknown request", + }, + } + for name, spec := range specs { + t.Run(name, func(t *testing.T) { + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetPostHandler(func(ctx sdk.Context, tx sdk.Tx, simulate, success bool) (newCtx sdk.Context, err error) { + return ctx, spec.postHandlerErr + }) + } + suite := NewBaseAppSuite(t, anteOpt) + csMock := mockCounterServer{ + incrementCounterFn: func(ctx context.Context, counter *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { + return &baseapptestutil.MsgCreateCounterResponse{}, spec.msgHandlerErr + }, + } + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), csMock) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &cmtproto.ConsensusParams{}, + }) + require.NoError(t, err) + + txBytes, err := suite.txConfig.TxEncoder()(newTxCounter(t, suite.txConfig, 0, 0)) + require.NoError(t, err) + + // when + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + // then + require.NoError(t, err) + assert.Equal(t, spec.expCode, res.TxResults[0].Code) + assert.Equal(t, spec.expLog, res.TxResults[0].Log) + }) + } +} + +// Test and ensure that invalid block heights always cause errors. +// See issues: +// - https://github.com/cosmos/cosmos-sdk/issues/11220 +// - https://github.com/cosmos/cosmos-sdk/issues/7662 +func TestABCI_CreateQueryContext(t *testing.T) { + t.Parallel() + app := getQueryBaseapp(t) + + testCases := []struct { + name string + height int64 + headerHeight int64 + prove bool + expErr bool + }{ + {"valid height", 2, 2, true, false}, + {"valid height with different initial height", 2, 1, true, true}, + {"future height", 10, 10, true, true}, + {"negative height, prove=true", -1, -1, true, true}, + {"negative height, prove=false", -1, -1, false, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.headerHeight != tc.height { + _, err := app.InitChain(&abci.RequestInitChain{ + InitialHeight: tc.headerHeight, + }) + require.NoError(t, err) + } + height := tc.height + if tc.height > tc.headerHeight { + height = 0 + } + ctx, err := app.CreateQueryContext(height, tc.prove) + if tc.expErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.height, ctx.BlockHeight()) + } + }) + } +} + +func TestABCI_CreateQueryContextWithCheckHeader(t *testing.T) { + t.Parallel() + app := getQueryBaseapp(t) + var height int64 = 2 + var headerHeight int64 = 1 + + testCases := []struct { + checkHeader bool + expErr bool + }{ + {true, true}, + {false, false}, + } + + for _, tc := range testCases { + t.Run("valid height with different initial height", func(t *testing.T) { + _, err := app.InitChain(&abci.RequestInitChain{ + InitialHeight: headerHeight, + }) + require.NoError(t, err) + ctx, err := app.CreateQueryContextWithCheckHeader(0, true, tc.checkHeader) + if tc.expErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, height, ctx.BlockHeight()) + } + }) + } +} + +func TestABCI_CreateQueryContext_Before_Set_CheckState(t *testing.T) { + t.Parallel() + + db := dbm.NewMemDB() + name := t.Name() + var height int64 = 2 + var headerHeight int64 = 1 + + t.Run("valid height with different initial height", func(t *testing.T) { + app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) + + _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) + require.NoError(t, err) + + var queryCtx *sdk.Context + var queryCtxErr error + app.SetStreamingManager(storetypes.StreamingManager{ + ABCIListeners: []storetypes.ABCIListener{ + &mockABCIListener{ + ListenCommitFn: func(context.Context, abci.ResponseCommit, []*storetypes.StoreKVPair) error { + qCtx, qErr := app.CreateQueryContext(0, true) + queryCtx = &qCtx + queryCtxErr = qErr + return nil + }, + }, + }, + }) + _, err = app.Commit() + require.NoError(t, err) + require.NoError(t, queryCtxErr) + require.Equal(t, height, queryCtx.BlockHeight()) + _, err = app.InitChain(&abci.RequestInitChain{ + InitialHeight: headerHeight, + }) + require.NoError(t, err) + }) +} + +func TestSetMinGasPrices(t *testing.T) { + minGasPrices := sdk.DecCoins{sdk.NewInt64DecCoin("stake", 5000)} + suite := NewBaseAppSuite(t, baseapp.SetMinGasPrices(minGasPrices.String())) + + ctx := getCheckStateCtx(suite.baseApp) + require.Equal(t, minGasPrices, ctx.MinGasPrices()) +} + +type ctxType string + +const ( + QueryCtx ctxType = "query" + CheckTxCtx ctxType = "checkTx" +) + +var ctxTypes = []ctxType{QueryCtx, CheckTxCtx} + +func (c ctxType) GetCtx(t *testing.T, bapp *baseapp.BaseApp) sdk.Context { + t.Helper() + switch c { + case QueryCtx: + ctx, err := bapp.CreateQueryContext(1, false) + require.NoError(t, err) + return ctx + case CheckTxCtx: + return getCheckStateCtx(bapp) + } + // TODO: Not supported yet + return getFinalizeBlockStateCtx(bapp) +} + +func TestQueryGasLimit(t *testing.T) { + testCases := []struct { + queryGasLimit uint64 + gasActuallyUsed uint64 + shouldQueryErr bool + }{ + {queryGasLimit: 100, gasActuallyUsed: 50, shouldQueryErr: false}, // Valid case + {queryGasLimit: 100, gasActuallyUsed: 150, shouldQueryErr: true}, // gasActuallyUsed > queryGasLimit + {queryGasLimit: 0, gasActuallyUsed: 50, shouldQueryErr: false}, // fuzzing with queryGasLimit = 0 + {queryGasLimit: 0, gasActuallyUsed: 0, shouldQueryErr: false}, // both queryGasLimit and gasActuallyUsed are 0 + {queryGasLimit: 200, gasActuallyUsed: 200, shouldQueryErr: false}, // gasActuallyUsed == queryGasLimit + {queryGasLimit: 100, gasActuallyUsed: 1000, shouldQueryErr: true}, // gasActuallyUsed > queryGasLimit + } + + for _, tc := range testCases { + for _, ctxType := range ctxTypes { + t.Run(fmt.Sprintf("%s: %d - %d", ctxType, tc.queryGasLimit, tc.gasActuallyUsed), func(t *testing.T) { + app := getQueryBaseapp(t) + baseapp.SetQueryGasLimit(tc.queryGasLimit)(app) + ctx := ctxType.GetCtx(t, app) + + // query gas limit should have no effect when CtxType != QueryCtx + if tc.shouldQueryErr && ctxType == QueryCtx { + require.Panics(t, func() { ctx.GasMeter().ConsumeGas(tc.gasActuallyUsed, "test") }) + } else { + require.NotPanics(t, func() { ctx.GasMeter().ConsumeGas(tc.gasActuallyUsed, "test") }) + } + }) + } + } +} + +func TestGetMaximumBlockGas(t *testing.T) { + suite := NewBaseAppSuite(t) + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{}) + require.NoError(t, err) + + ctx := suite.baseApp.NewContext(true) + + require.NoError(t, suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: 0}})) + require.Equal(t, uint64(0), suite.baseApp.GetMaximumBlockGas(ctx)) + + require.NoError(t, suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: -1}})) + require.Equal(t, uint64(0), suite.baseApp.GetMaximumBlockGas(ctx)) + + require.NoError(t, suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: 5000000}})) + require.Equal(t, uint64(5000000), suite.baseApp.GetMaximumBlockGas(ctx)) + + require.NoError(t, suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: -5000000}})) + require.Panics(t, func() { suite.baseApp.GetMaximumBlockGas(ctx) }) +} + +func TestGetEmptyConsensusParams(t *testing.T) { + suite := NewBaseAppSuite(t) + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{}) + require.NoError(t, err) + ctx := suite.baseApp.NewContext(true) + + cp := suite.baseApp.GetConsensusParams(ctx) + require.Equal(t, cmtproto.ConsensusParams{}, cp) + require.Equal(t, uint64(0), suite.baseApp.GetMaximumBlockGas(ctx)) +} + +func TestLoadVersionPruning(t *testing.T) { + logger := log.NewNopLogger() + pruningOptions := pruningtypes.NewCustomPruningOptions(10, 15) + pruningOpt := baseapp.SetPruning(pruningOptions) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + // make a cap key and mount the store + capKey := storetypes.NewKVStoreKey("key1") + app.MountStores(capKey) + + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + + emptyHash := sha256.Sum256([]byte{}) + emptyCommitID := storetypes.CommitID{ + Hash: emptyHash[:], + } + + // fresh store has zero/empty last commit + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, int64(0), lastHeight) + require.Equal(t, emptyCommitID, lastID) + + var lastCommitID storetypes.CommitID + + // Commit seven blocks, of which 7 (latest) is kept in addition to 6, 5 + // (keep recent) and 3 (keep every). + for i := int64(1); i <= 7; i++ { + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: i}) + require.NoError(t, err) + _, err = app.Commit() + require.NoError(t, err) + lastCommitID = storetypes.CommitID{Version: i, Hash: res.AppHash} + } + + for _, v := range []int64{1, 2, 4} { + _, err = app.CommitMultiStore().CacheMultiStoreWithVersion(v) + require.NoError(t, err) + } + + for _, v := range []int64{3, 5, 6, 7} { + _, err = app.CommitMultiStore().CacheMultiStoreWithVersion(v) + require.NoError(t, err) + } + + // reload with LoadLatestVersion, check it loads last version + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + app.MountStores(capKey) + + err = app.LoadLatestVersion() + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(7), lastCommitID) +} diff --git a/baseapp/block_gas_test.go b/baseapp/block_gas_test.go new file mode 100644 index 0000000..8fb200b --- /dev/null +++ b/baseapp/block_gas_test.go @@ -0,0 +1,253 @@ +package baseapp_test + +import ( + "context" + "math" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + cmtjson "github.com/cometbft/cometbft/libs/json" + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/depinject" + "cosmossdk.io/log" + sdkmath "cosmossdk.io/math" + store "cosmossdk.io/store/types" + + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil/configurator" + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + txtypes "github.com/cosmos/cosmos-sdk/types/tx" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + xauthsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" +) + +var blockMaxGas = uint64(simtestutil.DefaultConsensusParams.Block.MaxGas) + +type BlockGasImpl struct { + panicTx bool + gasToConsume uint64 + key store.StoreKey +} + +func (m BlockGasImpl) Set(ctx context.Context, msg *baseapptestutil.MsgKeyValue) (*baseapptestutil.MsgCreateKeyValueResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.KVStore(m.key).Set(msg.Key, msg.Value) + sdkCtx.GasMeter().ConsumeGas(m.gasToConsume, "TestMsg") + if m.panicTx { + panic("panic in tx execution") + } + return &baseapptestutil.MsgCreateKeyValueResponse{}, nil +} + +func TestBaseApp_BlockGas(t *testing.T) { + testcases := []struct { + name string + gasToConsume uint64 // gas to consume in the msg execution + panicTx bool // panic explicitly in tx execution + expErr bool + }{ + {"less than block gas meter", 10, false, false}, + {"more than block gas meter", blockMaxGas, false, true}, + {"more than block gas meter", uint64(float64(blockMaxGas) * 1.2), false, true}, + {"consume MaxUint64", math.MaxUint64, true, true}, + {"consume MaxGasWanted", txtypes.MaxGasWanted, false, true}, + {"consume block gas when panicked", 10, true, true}, + } + + for _, tc := range testcases { + var ( + bankKeeper bankkeeper.Keeper + accountKeeper authkeeper.AccountKeeper + appBuilder *runtime.AppBuilder + txConfig client.TxConfig + cdc codec.Codec + interfaceRegistry codectypes.InterfaceRegistry + err error + ) + + err = depinject.Inject( + depinject.Configs( + configurator.NewAppConfig( + configurator.AuthModule(), + configurator.TxModule(), + configurator.ConsensusModule(), + configurator.BankModule(), + configurator.StakingModule(), + ), + depinject.Supply(log.NewNopLogger()), + ), + &bankKeeper, + &accountKeeper, + &interfaceRegistry, + &txConfig, + &cdc, + &appBuilder) + require.NoError(t, err) + + bapp := appBuilder.Build(dbm.NewMemDB(), nil) + err = bapp.Load(true) + require.NoError(t, err) + + t.Run(tc.name, func(t *testing.T) { + baseapptestutil.RegisterInterfaces(interfaceRegistry) + baseapptestutil.RegisterKeyValueServer(bapp.MsgServiceRouter(), BlockGasImpl{ + panicTx: tc.panicTx, + gasToConsume: tc.gasToConsume, + key: bapp.UnsafeFindStoreKey(banktypes.ModuleName), + }) + + genState := GenesisStateWithSingleValidator(t, cdc, appBuilder) + stateBytes, err := cmtjson.MarshalIndent(genState, "", " ") + require.NoError(t, err) + _, err = bapp.InitChain(&abci.RequestInitChain{ + Validators: []abci.ValidatorUpdate{}, + ConsensusParams: simtestutil.DefaultConsensusParams, + AppStateBytes: stateBytes, + }) + + require.NoError(t, err) + ctx := bapp.NewContext(false) + + // tx fee + feeCoin := sdk.NewCoin("atom", sdkmath.NewInt(150)) + feeAmount := sdk.NewCoins(feeCoin) + + // test account and fund + priv1, _, addr1 := testdata.KeyTestPubAddr() + err = bankKeeper.MintCoins(ctx, minttypes.ModuleName, feeAmount) + require.NoError(t, err) + err = bankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, addr1, feeAmount) + require.NoError(t, err) + require.Equal(t, feeCoin.Amount, bankKeeper.GetBalance(ctx, addr1, feeCoin.Denom).Amount) + seq := accountKeeper.GetAccount(ctx, addr1).GetSequence() + require.Equal(t, uint64(0), seq) + + // msg and signatures + msg := &baseapptestutil.MsgKeyValue{ + Key: []byte("ok"), + Value: []byte("ok"), + Signer: addr1.String(), + } + + txBuilder := txConfig.NewTxBuilder() + + require.NoError(t, txBuilder.SetMsgs(msg)) + txBuilder.SetFeeAmount(feeAmount) + txBuilder.SetGasLimit(uint64(simtestutil.DefaultConsensusParams.Block.MaxGas)) + + senderAccountNumber := accountKeeper.GetAccount(ctx, addr1).GetAccountNumber() + privs, accNums, accSeqs := []cryptotypes.PrivKey{priv1}, []uint64{senderAccountNumber}, []uint64{0} + _, txBytes, err := createTestTx(txConfig, txBuilder, privs, accNums, accSeqs, ctx.ChainID()) + require.NoError(t, err) + + rsp, err := bapp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + + // check result + ctx = bapp.GetContextForFinalizeBlock(txBytes) + okValue := ctx.KVStore(bapp.UnsafeFindStoreKey(banktypes.ModuleName)).Get([]byte("ok")) + + if tc.expErr { + if tc.panicTx { + require.Equal(t, sdkerrors.ErrPanic.ABCICode(), rsp.TxResults[0].Code) + } else { + require.Equal(t, sdkerrors.ErrOutOfGas.ABCICode(), rsp.TxResults[0].Code) + } + require.Empty(t, okValue) + } else { + require.Equal(t, uint32(0), rsp.TxResults[0].Code) + require.Equal(t, []byte("ok"), okValue) + } + // check block gas is always consumed + baseGas := uint64(57504) // baseGas is the gas consumed before tx msg + expGasConsumed := min(addUint64Saturating(tc.gasToConsume, baseGas), uint64(simtestutil.DefaultConsensusParams.Block.MaxGas)) + require.Equal(t, int(expGasConsumed), int(ctx.BlockGasMeter().GasConsumed())) + // tx fee is always deducted + require.Equal(t, int64(0), bankKeeper.GetBalance(ctx, addr1, feeCoin.Denom).Amount.Int64()) + // sender's sequence is always increased + seq = accountKeeper.GetAccount(ctx, addr1).GetSequence() + require.NoError(t, err) + require.Equal(t, uint64(1), seq) + }) + } +} + +func createTestTx(txConfig client.TxConfig, txBuilder client.TxBuilder, privs []cryptotypes.PrivKey, accNums, accSeqs []uint64, chainID string) (xauthsigning.Tx, []byte, error) { + defaultSignMode, err := xauthsigning.APISignModeToInternal(txConfig.SignModeHandler().DefaultMode()) + if err != nil { + return nil, nil, err + } + // First round: we gather all the signer infos. We use the "set empty + // signature" hack to do that. + var sigsV2 []signing.SignatureV2 + for i, priv := range privs { + sigV2 := signing.SignatureV2{ + PubKey: priv.PubKey(), + Data: &signing.SingleSignatureData{ + SignMode: defaultSignMode, + Signature: nil, + }, + Sequence: accSeqs[i], + } + + sigsV2 = append(sigsV2, sigV2) + } + err = txBuilder.SetSignatures(sigsV2...) + if err != nil { + return nil, nil, err + } + + // Second round: all signer infos are set, so each signer can sign. + sigsV2 = []signing.SignatureV2{} + for i, priv := range privs { + signerData := xauthsigning.SignerData{ + Address: sdk.AccAddress(priv.PubKey().Bytes()).String(), + ChainID: chainID, + AccountNumber: accNums[i], + Sequence: accSeqs[i], + PubKey: priv.PubKey(), + } + sigV2, err := tx.SignWithPrivKey( + context.TODO(), defaultSignMode, signerData, + txBuilder, priv, txConfig, accSeqs[i]) + if err != nil { + return nil, nil, err + } + + sigsV2 = append(sigsV2, sigV2) + } + err = txBuilder.SetSignatures(sigsV2...) + if err != nil { + return nil, nil, err + } + + txBytes, err := txConfig.TxEncoder()(txBuilder.GetTx()) + if err != nil { + return nil, nil, err + } + + return txBuilder.GetTx(), txBytes, nil +} + +func addUint64Saturating(a, b uint64) uint64 { + if math.MaxUint64-a < b { + return math.MaxUint64 + } + + return a + b +} diff --git a/baseapp/circuit.go b/baseapp/circuit.go new file mode 100644 index 0000000..d0b9ee5 --- /dev/null +++ b/baseapp/circuit.go @@ -0,0 +1,8 @@ +package baseapp + +import "context" + +// CircuitBreaker is an interface that defines the methods for a circuit breaker. +type CircuitBreaker interface { + IsAllowed(ctx context.Context, typeURL string) (bool, error) +} diff --git a/baseapp/genesis.go b/baseapp/genesis.go new file mode 100644 index 0000000..8002f75 --- /dev/null +++ b/baseapp/genesis.go @@ -0,0 +1,23 @@ +package baseapp + +import ( + "errors" + + "github.com/cometbft/cometbft/abci/types" + + "cosmossdk.io/core/genesis" +) + +var _ genesis.TxHandler = (*BaseApp)(nil) + +// ExecuteGenesisTx implements genesis.GenesisState from +// cosmossdk.io/core/genesis to set initial state in genesis +func (ba *BaseApp) ExecuteGenesisTx(tx []byte) error { + res := ba.deliverTx(tx) + + if res.Code != types.CodeTypeOK { + return errors.New(res.Log) + } + + return nil +} diff --git a/baseapp/grpcrouter.go b/baseapp/grpcrouter.go new file mode 100644 index 0000000..b9ccafd --- /dev/null +++ b/baseapp/grpcrouter.go @@ -0,0 +1,159 @@ +package baseapp + +import ( + "context" + "fmt" + + abci "github.com/cometbft/cometbft/abci/types" + gogogrpc "github.com/cosmos/gogoproto/grpc" + "google.golang.org/grpc" + "google.golang.org/grpc/encoding" + "google.golang.org/protobuf/runtime/protoiface" + + "github.com/cosmos/cosmos-sdk/baseapp/internal/protocompat" + "github.com/cosmos/cosmos-sdk/client/grpc/reflection" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GRPCQueryRouter routes ABCI Query requests to GRPC handlers +type GRPCQueryRouter struct { + // routes maps query handlers used in ABCIQuery. + routes map[string]GRPCQueryHandler + // hybridHandlers maps the request name to the handler. It is a hybrid handler which seamlessly + // handles both gogo and protov2 messages. + hybridHandlers map[string][]func(ctx context.Context, req, resp protoiface.MessageV1) error + // binaryCodec is used to encode/decode binary protobuf messages. + binaryCodec codec.BinaryCodec + // cdc is the gRPC codec used by the router to correctly unmarshal messages. + cdc encoding.Codec + // serviceData contains the gRPC services and their handlers. + serviceData []serviceData +} + +// serviceData represents a gRPC service, along with its handler. +type serviceData struct { + serviceDesc *grpc.ServiceDesc + handler any +} + +var _ gogogrpc.Server = &GRPCQueryRouter{} + +// NewGRPCQueryRouter creates a new GRPCQueryRouter +func NewGRPCQueryRouter() *GRPCQueryRouter { + return &GRPCQueryRouter{ + routes: map[string]GRPCQueryHandler{}, + hybridHandlers: map[string][]func(ctx context.Context, req, resp protoiface.MessageV1) error{}, + } +} + +// GRPCQueryHandler defines a function type which handles ABCI Query requests +// using gRPC +type GRPCQueryHandler = func(ctx sdk.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) + +// Route returns the GRPCQueryHandler for a given query route path or nil +// if not found +func (qrt *GRPCQueryRouter) Route(path string) GRPCQueryHandler { + handler, found := qrt.routes[path] + if !found { + return nil + } + return handler +} + +// RegisterService implements the gRPC Server.RegisterService method. sd is a gRPC +// service description, handler is an object which implements that gRPC service/ +// +// This functions PANICS: +// - if a protobuf service is registered twice. +func (qrt *GRPCQueryRouter) RegisterService(sd *grpc.ServiceDesc, handler any) { + // adds a top-level query handler based on the gRPC service name + for _, method := range sd.Methods { + err := qrt.registerABCIQueryHandler(sd, method, handler) + if err != nil { + panic(err) + } + err = qrt.registerHybridHandler(sd, method, handler) + if err != nil { + panic(err) + } + } + + qrt.serviceData = append(qrt.serviceData, serviceData{ + serviceDesc: sd, + handler: handler, + }) +} + +func (qrt *GRPCQueryRouter) registerABCIQueryHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error { + fqName := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName) + methodHandler := method.Handler + + // Check that each service is only registered once. If a service is + // registered more than once, then we should error. Since we can't + // return an error (`Server.RegisterService` interface restriction) we + // panic (at startup). + _, found := qrt.routes[fqName] + if found { + return fmt.Errorf( + "gRPC query service %s has already been registered. Please make sure to only register each service once. "+ + "This usually means that there are conflicting modules registering the same gRPC query service", + fqName, + ) + } + + qrt.routes[fqName] = func(ctx sdk.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { + // call the method handler from the service description with the handler object, + // a wrapped sdk.Context with proto-unmarshaled data from the ABCI request data + res, err := methodHandler(handler, ctx, func(i any) error { + return qrt.cdc.Unmarshal(req.Data, i) + }, nil) + if err != nil { + return nil, err + } + + // proto marshal the result bytes + var resBytes []byte + resBytes, err = qrt.cdc.Marshal(res) + if err != nil { + return nil, err + } + + // return the result bytes as the response value + return &abci.ResponseQuery{ + Height: req.Height, + Value: resBytes, + }, nil + } + return nil +} + +func (qrt *GRPCQueryRouter) HybridHandlerByRequestName(name string) []func(ctx context.Context, req, resp protoiface.MessageV1) error { + return qrt.hybridHandlers[name] +} + +func (qrt *GRPCQueryRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error { + // extract message name from method descriptor + inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method) + if err != nil { + return err + } + methodHandler, err := protocompat.MakeHybridHandler(qrt.binaryCodec, sd, method, handler) + if err != nil { + return err + } + qrt.hybridHandlers[string(inputName)] = append(qrt.hybridHandlers[string(inputName)], methodHandler) + return nil +} + +// SetInterfaceRegistry sets the interface registry for the router. This will +// also register the interface reflection gRPC service. +func (qrt *GRPCQueryRouter) SetInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) { + // instantiate the codec + qrt.cdc = codec.NewProtoCodec(interfaceRegistry).GRPCCodec() + qrt.binaryCodec = codec.NewProtoCodec(interfaceRegistry) + // Once we have an interface registry, we can register the interface + // registry reflection gRPC service. + reflection.RegisterReflectionServiceServer(qrt, reflection.NewReflectionServiceServer(interfaceRegistry)) +} diff --git a/baseapp/grpcrouter_helpers.go b/baseapp/grpcrouter_helpers.go new file mode 100644 index 0000000..60d3fd9 --- /dev/null +++ b/baseapp/grpcrouter_helpers.go @@ -0,0 +1,64 @@ +package baseapp + +import ( + gocontext "context" + "fmt" + + abci "github.com/cometbft/cometbft/abci/types" + gogogrpc "github.com/cosmos/gogoproto/grpc" + "google.golang.org/grpc" + + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// QueryServiceTestHelper provides a helper for making grpc query service +// rpc calls in unit tests. It implements both the grpc Server and ClientConn +// interfaces needed to register a query service server and create a query +// service client. +type QueryServiceTestHelper struct { + *GRPCQueryRouter + Ctx sdk.Context +} + +var ( + _ gogogrpc.Server = &QueryServiceTestHelper{} + _ gogogrpc.ClientConn = &QueryServiceTestHelper{} +) + +// NewQueryServerTestHelper creates a new QueryServiceTestHelper that wraps +// the provided sdk.Context +func NewQueryServerTestHelper(ctx sdk.Context, interfaceRegistry types.InterfaceRegistry) *QueryServiceTestHelper { + qrt := NewGRPCQueryRouter() + qrt.SetInterfaceRegistry(interfaceRegistry) + return &QueryServiceTestHelper{GRPCQueryRouter: qrt, Ctx: ctx} +} + +// Invoke implements the grpc ClientConn.Invoke method +func (q *QueryServiceTestHelper) Invoke(_ gocontext.Context, method string, args, reply any, _ ...grpc.CallOption) error { + querier := q.Route(method) + if querier == nil { + return fmt.Errorf("handler not found for %s", method) + } + reqBz, err := q.cdc.Marshal(args) + if err != nil { + return err + } + + res, err := querier(q.Ctx, &abci.RequestQuery{Data: reqBz}) + if err != nil { + return err + } + + err = q.cdc.Unmarshal(res.Value, reply) + if err != nil { + return err + } + + return nil +} + +// NewStream implements the grpc ClientConn.NewStream method +func (q *QueryServiceTestHelper) NewStream(gocontext.Context, *grpc.StreamDesc, string, ...grpc.CallOption) (grpc.ClientStream, error) { + return nil, fmt.Errorf("not supported") +} diff --git a/baseapp/grpcrouter_test.go b/baseapp/grpcrouter_test.go new file mode 100644 index 0000000..63d8499 --- /dev/null +++ b/baseapp/grpcrouter_test.go @@ -0,0 +1,224 @@ +package baseapp_test + +import ( + "context" + "sync" + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/depinject" + "cosmossdk.io/log" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestGRPCQueryRouter(t *testing.T) { + qr := baseapp.NewGRPCQueryRouter() + interfaceRegistry := testdata.NewTestInterfaceRegistry() + qr.SetInterfaceRegistry(interfaceRegistry) + testdata_pulsar.RegisterQueryServer(qr, testdata_pulsar.QueryImpl{}) + helper := &baseapp.QueryServiceTestHelper{ + GRPCQueryRouter: qr, + Ctx: sdk.Context{}.WithContext(context.Background()), + } + client := testdata.NewQueryClient(helper) + + res, err := client.Echo(context.Background(), &testdata.EchoRequest{Message: "hello"}) + require.Nil(t, err) + require.NotNil(t, res) + require.Equal(t, "hello", res.Message) + + res, err = client.Echo(context.Background(), nil) + require.Nil(t, err) + require.Empty(t, res.Message) + + res2, err := client.SayHello(context.Background(), &testdata.SayHelloRequest{Name: "Foo"}) + require.Nil(t, err) + require.NotNil(t, res) + require.Equal(t, "Hello Foo!", res2.Greeting) + + spot := &testdata.Dog{Name: "Spot", Size_: "big"} + any, err := types.NewAnyWithValue(spot) + require.NoError(t, err) + res3, err := client.TestAny(context.Background(), &testdata.TestAnyRequest{AnyAnimal: any}) + require.NoError(t, err) + require.NotNil(t, res3) + require.Equal(t, spot, res3.HasAnimal.Animal.GetCachedValue()) +} + +func TestGRPCRouterHybridHandlers(t *testing.T) { + assertRouterBehaviour := func(helper *baseapp.QueryServiceTestHelper) { + // test getting the handler by name + handlers := helper.HybridHandlerByRequestName("testpb.EchoRequest") + require.NotNil(t, handlers) + require.Len(t, handlers, 1) + handler := handlers[0] + // sending a protov2 message should work, and return a protov2 message + v2Resp := new(testdata_pulsar.EchoResponse) + err := handler(helper.Ctx, &testdata_pulsar.EchoRequest{Message: "hello"}, v2Resp) + require.Nil(t, err) + require.Equal(t, "hello", v2Resp.Message) + // also sending a protov1 message should work, and return a gogoproto message + gogoResp := new(testdata.EchoResponse) + err = handler(helper.Ctx, &testdata.EchoRequest{Message: "hello"}, gogoResp) + require.NoError(t, err) + require.Equal(t, "hello", gogoResp.Message) + } + + t.Run("protov2 server", func(t *testing.T) { + qr := baseapp.NewGRPCQueryRouter() + interfaceRegistry := testdata.NewTestInterfaceRegistry() + qr.SetInterfaceRegistry(interfaceRegistry) + testdata_pulsar.RegisterQueryServer(qr, testdata_pulsar.QueryImpl{}) + helper := &baseapp.QueryServiceTestHelper{ + GRPCQueryRouter: qr, + Ctx: sdk.Context{}.WithContext(context.Background()), + } + assertRouterBehaviour(helper) + }) + + t.Run("gogoproto server", func(t *testing.T) { + qr := baseapp.NewGRPCQueryRouter() + interfaceRegistry := testdata.NewTestInterfaceRegistry() + qr.SetInterfaceRegistry(interfaceRegistry) + testdata.RegisterQueryServer(qr, testdata.QueryImpl{}) + helper := &baseapp.QueryServiceTestHelper{ + GRPCQueryRouter: qr, + Ctx: sdk.Context{}.WithContext(context.Background()), + } + assertRouterBehaviour(helper) + }) +} + +func TestRegisterQueryServiceTwice(t *testing.T) { + // Setup baseapp. + var appBuilder *runtime.AppBuilder + err := depinject.Inject( + depinject.Configs( + makeMinimalConfig(), + depinject.Supply(log.NewTestLogger(t)), + ), + &appBuilder) + require.NoError(t, err) + db := dbm.NewMemDB() + app := appBuilder.Build(db, nil) + + // First time registering service shouldn't panic. + require.NotPanics(t, func() { + testdata.RegisterQueryServer( + app.GRPCQueryRouter(), + testdata.QueryImpl{}, + ) + }) + + // Second time should panic. + require.Panics(t, func() { + testdata.RegisterQueryServer( + app.GRPCQueryRouter(), + testdata.QueryImpl{}, + ) + }) +} + +// Tests that we don't have data races per +// https://github.com/cosmos/cosmos-sdk/issues/10324 +// but with the same client connection being used concurrently. +func TestQueryDataRaces_sameConnectionToSameHandler(t *testing.T) { + var mu sync.Mutex + var helper *baseapp.QueryServiceTestHelper + makeClientConn := func(qr *baseapp.GRPCQueryRouter) *baseapp.QueryServiceTestHelper { + mu.Lock() + defer mu.Unlock() + + if helper == nil { + helper = &baseapp.QueryServiceTestHelper{ + GRPCQueryRouter: qr, + Ctx: sdk.Context{}.WithContext(context.Background()), + } + } + return helper + } + testQueryDataRacesSameHandler(t, makeClientConn) +} + +// Tests that we don't have data races per +// https://github.com/cosmos/cosmos-sdk/issues/10324 +// but with unique client connections requesting from the same handler concurrently. +func TestQueryDataRaces_uniqueConnectionsToSameHandler(t *testing.T) { + // Return a new handler for every single call. + testQueryDataRacesSameHandler(t, func(qr *baseapp.GRPCQueryRouter) *baseapp.QueryServiceTestHelper { + return &baseapp.QueryServiceTestHelper{ + GRPCQueryRouter: qr, + Ctx: sdk.Context{}.WithContext(context.Background()), + } + }) +} + +func testQueryDataRacesSameHandler(t *testing.T, makeClientConn func(*baseapp.GRPCQueryRouter) *baseapp.QueryServiceTestHelper) { + t.Helper() + t.Parallel() + + qr := baseapp.NewGRPCQueryRouter() + interfaceRegistry := testdata.NewTestInterfaceRegistry() + qr.SetInterfaceRegistry(interfaceRegistry) + testdata.RegisterQueryServer(qr, testdata.QueryImpl{}) + + // The goal is to invoke the router concurrently and check for any data races. + // 0. Run with: go test -race + // 1. Synchronize every one of the 1,000 goroutines waiting to all query at the + // same time. + // 2. Once the greenlight is given, perform a query through the router. + var wg sync.WaitGroup + defer wg.Wait() + + greenlight := make(chan bool) + n := 1000 + ready := make(chan bool, n) + go func() { + for range n { + <-ready + } + close(greenlight) + }() + + for range n { + wg.Add(1) + go func() { + defer wg.Done() + + // Wait until we get the green light to start. + ready <- true + <-greenlight + + client := testdata.NewQueryClient(makeClientConn(qr)) + res, err := client.Echo(context.Background(), &testdata.EchoRequest{Message: "hello"}) + require.Nil(t, err) + require.NotNil(t, res) + require.Equal(t, "hello", res.Message) + + res, err = client.Echo(context.Background(), nil) + require.Nil(t, err) + require.Empty(t, res.Message) + + res2, err := client.SayHello(context.Background(), &testdata.SayHelloRequest{Name: "Foo"}) + require.Nil(t, err) + require.NotNil(t, res) + require.Equal(t, "Hello Foo!", res2.Greeting) + + spot := &testdata.Dog{Name: "Spot", Size_: "big"} + any, err := types.NewAnyWithValue(spot) + require.NoError(t, err) + res3, err := client.TestAny(context.Background(), &testdata.TestAnyRequest{AnyAnimal: any}) + require.NoError(t, err) + require.NotNil(t, res3) + require.Equal(t, spot, res3.HasAnimal.Animal.GetCachedValue()) + }() + } +} diff --git a/baseapp/grpcserver.go b/baseapp/grpcserver.go new file mode 100644 index 0000000..475fd42 --- /dev/null +++ b/baseapp/grpcserver.go @@ -0,0 +1,128 @@ +package baseapp + +import ( + "context" + "fmt" + "strconv" + + gogogrpc "github.com/cosmos/gogoproto/grpc" + grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" + grpcrecovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + errorsmod "cosmossdk.io/errors" + storetypes "cosmossdk.io/store/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + grpctypes "github.com/cosmos/cosmos-sdk/types/grpc" +) + +// RegisterGRPCServer registers gRPC services directly with the gRPC server. +func (app *BaseApp) RegisterGRPCServer(server gogogrpc.Server) { + app.RegisterGRPCServerWithSkipCheckHeader(server, false) +} + +// RegisterGRPCServerWithSkipCheckHeader registers gRPC services with the specified gRPC server +// and bypass check header flag. During the commit phase, gRPC queries may be processed before the block header +// is fully updated, causing header checks to fail erroneously. Skipping the header check in these cases prevents +// false negatives and ensures more robust query handling. While bypassing the header check is generally preferred to avoid false +// negatives during the commit phase, there are niche scenarios where someone might want to enable it. +// For instance, if an application requires strict validation to ensure that the query context exactly +// reflects the expected block header (for consistency or security reasons), then enabling header checks +// could be beneficial. However, this strictness comes at the cost of potentially more frequent errors +// when queries occur during the commit phase. +func (app *BaseApp) RegisterGRPCServerWithSkipCheckHeader(server gogogrpc.Server, skipCheckHeader bool) { + // Define an interceptor for all gRPC queries: this interceptor will create + // a new sdk.Context, and pass it into the query handler. + interceptor := func(grpcCtx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + // If there's some metadata in the context, retrieve it. + md, ok := metadata.FromIncomingContext(grpcCtx) + if !ok { + return nil, status.Error(codes.Internal, "unable to retrieve metadata") + } + + // Get height header from the request context, if present. + var height int64 + if heightHeaders := md.Get(grpctypes.GRPCBlockHeightHeader); len(heightHeaders) == 1 { + height, err = strconv.ParseInt(heightHeaders[0], 10, 64) + if err != nil { + return nil, errorsmod.Wrapf( + sdkerrors.ErrInvalidRequest, + "Baseapp.RegisterGRPCServer: invalid height header %q: %v", grpctypes.GRPCBlockHeightHeader, err) + } + if err := checkNegativeHeight(height); err != nil { + return nil, err + } + } + + // Create the sdk.Context. Passing false as 2nd arg, as we can't + // actually support proofs with gRPC right now. + sdkCtx, err := app.CreateQueryContextWithCheckHeader(height, false, !skipCheckHeader) + if err != nil { + return nil, err + } + + // Add relevant gRPC headers + if height == 0 { + height = sdkCtx.BlockHeight() // If height was not set in the request, set it to the latest + } + + // Attach the sdk.Context into the gRPC's context.Context. + grpcCtx = context.WithValue(grpcCtx, sdk.SdkContextKey, sdkCtx) + + md = metadata.Pairs(grpctypes.GRPCBlockHeightHeader, strconv.FormatInt(height, 10)) + if err = grpc.SetHeader(grpcCtx, md); err != nil { + app.logger.Error("failed to set gRPC header", "err", err) + } + + app.logger.Debug("gRPC query received", "type", fmt.Sprintf("%#v", req)) + + // Catch an OutOfGasPanic caused in the query handlers + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case storetypes.ErrorOutOfGas: + err = errorsmod.Wrapf(sdkerrors.ErrOutOfGas, "Query gas limit exceeded: %v, out of gas in location: %v", sdkCtx.GasMeter().Limit(), rType.Descriptor) + default: + panic(r) + } + } + }() + + return handler(grpcCtx, req) + } + + // Loop through all services and methods, add the interceptor, and register + // the service. + for _, data := range app.GRPCQueryRouter().serviceData { + desc := data.serviceDesc + newMethods := make([]grpc.MethodDesc, len(desc.Methods)) + + for i, method := range desc.Methods { + methodHandler := method.Handler + newMethods[i] = grpc.MethodDesc{ + MethodName: method.MethodName, + Handler: func(srv any, ctx context.Context, dec func(any) error, _ grpc.UnaryServerInterceptor) (any, error) { + return methodHandler(srv, ctx, dec, grpcmiddleware.ChainUnaryServer( + grpcrecovery.UnaryServerInterceptor(), + interceptor, + )) + }, + } + } + + newDesc := &grpc.ServiceDesc{ + ServiceName: desc.ServiceName, + HandlerType: desc.HandlerType, + Methods: newMethods, + Streams: desc.Streams, + Metadata: desc.Metadata, + } + + server.RegisterService(newDesc, data.handler) + } +} diff --git a/baseapp/info.go b/baseapp/info.go new file mode 100644 index 0000000..96b4ab5 --- /dev/null +++ b/baseapp/info.go @@ -0,0 +1,211 @@ +package baseapp + +import ( + "time" + + abci "github.com/cometbft/cometbft/abci/types" + + "cosmossdk.io/core/comet" +) + +// NewBlockInfo returns a new BlockInfo instance +// This function should be only used in tests +func NewBlockInfo( + misbehavior []abci.Misbehavior, + validatorsHash []byte, + proposerAddress []byte, + lastCommit abci.CommitInfo, +) comet.BlockInfo { + return &cometInfo{ + Misbehavior: misbehavior, + ValidatorsHash: validatorsHash, + ProposerAddress: proposerAddress, + LastCommit: lastCommit, + } +} + +// CometInfo defines the properties provided by comet to the application +type cometInfo struct { + Misbehavior []abci.Misbehavior + ValidatorsHash []byte + ProposerAddress []byte + LastCommit abci.CommitInfo +} + +func (r cometInfo) GetEvidence() comet.EvidenceList { + return evidenceWrapper{evidence: r.Misbehavior} +} + +func (r cometInfo) GetValidatorsHash() []byte { + return r.ValidatorsHash +} + +func (r cometInfo) GetProposerAddress() []byte { + return r.ProposerAddress +} + +func (r cometInfo) GetLastCommit() comet.CommitInfo { + return commitInfoWrapper{r.LastCommit} +} + +type evidenceWrapper struct { + evidence []abci.Misbehavior +} + +func (e evidenceWrapper) Len() int { + return len(e.evidence) +} + +func (e evidenceWrapper) Get(i int) comet.Evidence { + return misbehaviorWrapper{e.evidence[i]} +} + +// commitInfoWrapper is a wrapper around abci.CommitInfo that implements CommitInfo interface +type commitInfoWrapper struct { + abci.CommitInfo +} + +var _ comet.CommitInfo = (*commitInfoWrapper)(nil) + +func (c commitInfoWrapper) Round() int32 { + return c.CommitInfo.Round +} + +func (c commitInfoWrapper) Votes() comet.VoteInfos { + return abciVoteInfoWrapper{c.CommitInfo.Votes} +} + +// abciVoteInfoWrapper is a wrapper around abci.VoteInfo that implements VoteInfos interface +type abciVoteInfoWrapper struct { + votes []abci.VoteInfo +} + +var _ comet.VoteInfos = (*abciVoteInfoWrapper)(nil) + +func (e abciVoteInfoWrapper) Len() int { + return len(e.votes) +} + +func (e abciVoteInfoWrapper) Get(i int) comet.VoteInfo { + return voteInfoWrapper{e.votes[i]} +} + +// voteInfoWrapper is a wrapper around abci.VoteInfo that implements VoteInfo interface +type voteInfoWrapper struct { + abci.VoteInfo +} + +var _ comet.VoteInfo = (*voteInfoWrapper)(nil) + +func (v voteInfoWrapper) GetBlockIDFlag() comet.BlockIDFlag { + return comet.BlockIDFlag(v.BlockIdFlag) +} + +func (v voteInfoWrapper) Validator() comet.Validator { + return validatorWrapper{v.VoteInfo.Validator} +} + +// validatorWrapper is a wrapper around abci.Validator that implements Validator interface +type validatorWrapper struct { + abci.Validator +} + +var _ comet.Validator = (*validatorWrapper)(nil) + +func (v validatorWrapper) Address() []byte { + return v.Validator.Address +} + +func (v validatorWrapper) Power() int64 { + return v.Validator.Power +} + +type misbehaviorWrapper struct { + abci.Misbehavior +} + +func (m misbehaviorWrapper) Type() comet.MisbehaviorType { + return comet.MisbehaviorType(m.Misbehavior.Type) +} + +func (m misbehaviorWrapper) Height() int64 { + return m.Misbehavior.Height +} + +func (m misbehaviorWrapper) Validator() comet.Validator { + return validatorWrapper{m.Misbehavior.Validator} +} + +func (m misbehaviorWrapper) Time() time.Time { + return m.Misbehavior.Time +} + +func (m misbehaviorWrapper) TotalVotingPower() int64 { + return m.Misbehavior.TotalVotingPower +} + +type prepareProposalInfo struct { + *abci.RequestPrepareProposal +} + +var _ comet.BlockInfo = (*prepareProposalInfo)(nil) + +func (r prepareProposalInfo) GetEvidence() comet.EvidenceList { + return evidenceWrapper{r.Misbehavior} +} + +func (r prepareProposalInfo) GetValidatorsHash() []byte { + return r.NextValidatorsHash +} + +func (r prepareProposalInfo) GetProposerAddress() []byte { + return r.ProposerAddress +} + +func (r prepareProposalInfo) GetLastCommit() comet.CommitInfo { + return extendedCommitInfoWrapper{r.LocalLastCommit} +} + +var _ comet.BlockInfo = (*prepareProposalInfo)(nil) + +type extendedCommitInfoWrapper struct { + abci.ExtendedCommitInfo +} + +var _ comet.CommitInfo = (*extendedCommitInfoWrapper)(nil) + +func (e extendedCommitInfoWrapper) Round() int32 { + return e.ExtendedCommitInfo.Round +} + +func (e extendedCommitInfoWrapper) Votes() comet.VoteInfos { + return extendedVoteInfoWrapperList{e.ExtendedCommitInfo.Votes} +} + +type extendedVoteInfoWrapperList struct { + votes []abci.ExtendedVoteInfo +} + +var _ comet.VoteInfos = (*extendedVoteInfoWrapperList)(nil) + +func (e extendedVoteInfoWrapperList) Len() int { + return len(e.votes) +} + +func (e extendedVoteInfoWrapperList) Get(i int) comet.VoteInfo { + return extendedVoteInfoWrapper{e.votes[i]} +} + +type extendedVoteInfoWrapper struct { + abci.ExtendedVoteInfo +} + +var _ comet.VoteInfo = (*extendedVoteInfoWrapper)(nil) + +func (e extendedVoteInfoWrapper) GetBlockIDFlag() comet.BlockIDFlag { + return comet.BlockIDFlag(e.BlockIdFlag) +} + +func (e extendedVoteInfoWrapper) Validator() comet.Validator { + return validatorWrapper{e.ExtendedVoteInfo.Validator} +} diff --git a/baseapp/internal/protocompat/protocompat.go b/baseapp/internal/protocompat/protocompat.go new file mode 100644 index 0000000..5543ac8 --- /dev/null +++ b/baseapp/internal/protocompat/protocompat.go @@ -0,0 +1,232 @@ +package protocompat + +import ( + "context" + "fmt" + "reflect" + + gogoproto "github.com/cosmos/gogoproto/proto" + "github.com/golang/protobuf/proto" // nolint: staticcheck // needed because gogoproto.Merge does not work consistently. See NOTE: comments. + "google.golang.org/grpc" + proto2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + + "github.com/cosmos/cosmos-sdk/codec" +) + +var ( + gogoType = reflect.TypeOf((*gogoproto.Message)(nil)).Elem() + protov2Type = reflect.TypeOf((*proto2.Message)(nil)).Elem() + protov2MarshalOpts = proto2.MarshalOptions{Deterministic: true} +) + +type Handler = func(ctx context.Context, request, response protoiface.MessageV1) error + +func MakeHybridHandler(cdc codec.BinaryCodec, sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) (Handler, error) { + methodFullName := protoreflect.FullName(fmt.Sprintf("%s.%s", sd.ServiceName, method.MethodName)) + desc, err := gogoproto.HybridResolver.FindDescriptorByName(methodFullName) + if err != nil { + return nil, err + } + methodDesc, ok := desc.(protoreflect.MethodDescriptor) + if !ok { + return nil, fmt.Errorf("invalid method descriptor %s", methodFullName) + } + + isProtov2Handler, err := isProtov2(method) + if err != nil { + return nil, err + } + if isProtov2Handler { + return makeProtoV2HybridHandler(methodDesc, cdc, method, handler) + } + return makeGogoHybridHandler(methodDesc, cdc, method, handler) +} + +// makeProtoV2HybridHandler returns a handler that can handle both gogo and protov2 messages. +func makeProtoV2HybridHandler(prefMethod protoreflect.MethodDescriptor, cdc codec.BinaryCodec, method grpc.MethodDesc, handler any) (Handler, error) { + // it's a protov2 handler, if a gogo counterparty is not found we cannot handle gogo messages. + gogoExists := gogoproto.MessageType(string(prefMethod.Output().FullName())) != nil + if !gogoExists { + return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { + protov2Request, ok := inReq.(proto2.Message) + if !ok { + return fmt.Errorf("invalid request type %T, method %s does not accept gogoproto messages", inReq, prefMethod.FullName()) + } + resp, err := method.Handler(handler, ctx, func(msg any) error { + proto2.Merge(msg.(proto2.Message), protov2Request) + return nil + }, nil) + if err != nil { + return err + } + // merge on the resp + proto2.Merge(outResp.(proto2.Message), resp.(proto2.Message)) + return nil + }, nil + } + return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { + // we check if the request is a protov2 message. + switch m := inReq.(type) { + case proto2.Message: + // we can just call the handler after making a copy of the message, for safety reasons. + resp, err := method.Handler(handler, ctx, func(msg any) error { + proto2.Merge(msg.(proto2.Message), m) + return nil + }, nil) + if err != nil { + return err + } + // merge on the resp + proto2.Merge(outResp.(proto2.Message), resp.(proto2.Message)) + return nil + case gogoproto.Message: + // we need to marshal and unmarshal the request. + requestBytes, err := cdc.Marshal(m) + if err != nil { + return err + } + resp, err := method.Handler(handler, ctx, func(msg any) error { + // unmarshal request into the message. + return proto2.Unmarshal(requestBytes, msg.(proto2.Message)) + }, nil) + if err != nil { + return err + } + // the response is a protov2 message, so we cannot just return it. + // since the request came as gogoproto, we expect the response + // to also be gogoproto. + respBytes, err := protov2MarshalOpts.Marshal(resp.(proto2.Message)) + if err != nil { + return err + } + + // unmarshal response into a gogo message. + return cdc.Unmarshal(respBytes, outResp.(gogoproto.Message)) + default: + panic("unreachable") + } + }, nil +} + +func makeGogoHybridHandler(prefMethod protoreflect.MethodDescriptor, cdc codec.BinaryCodec, method grpc.MethodDesc, handler any) (Handler, error) { + // it's a gogo handler, we check if the existing protov2 counterparty exists. + _, err := protoregistry.GlobalTypes.FindMessageByName(prefMethod.Output().FullName()) + if err != nil { + // this can only be a gogo message. + return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { + _, ok := inReq.(proto2.Message) + if ok { + return fmt.Errorf("invalid request type %T, method %s does not accept protov2 messages", inReq, prefMethod.FullName()) + } + resp, err := method.Handler(handler, ctx, func(msg any) error { + // merge! ref: https://github.com/cosmos/cosmos-sdk/issues/18003 + // NOTE: using gogoproto.Merge will fail for some reason unknown to me, but + // using proto.Merge with gogo messages seems to work fine. + proto.Merge(msg.(gogoproto.Message), inReq) + return nil + }, nil) + if err != nil { + return err + } + // merge resp, ref: https://github.com/cosmos/cosmos-sdk/issues/18003 + // NOTE: using gogoproto.Merge will fail for some reason unknown to me, but + // using proto.Merge with gogo messages seems to work fine. + proto.Merge(outResp.(gogoproto.Message), resp.(gogoproto.Message)) + return nil + }, nil + } + // this is a gogo handler, and we have a protov2 counterparty. + return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { + switch m := inReq.(type) { + case proto2.Message: + // we need to marshal and unmarshal the request. + requestBytes, err := protov2MarshalOpts.Marshal(m) + if err != nil { + return err + } + resp, err := method.Handler(handler, ctx, func(msg any) error { + // unmarshal request into the message. + return cdc.Unmarshal(requestBytes, msg.(gogoproto.Message)) + }, nil) + if err != nil { + return err + } + // the response is a gogo message, so we cannot just return it. + // since the request came as protov2, we expect the response + // to also be protov2. + respBytes, err := cdc.Marshal(resp.(gogoproto.Message)) + if err != nil { + return err + } + // now we unmarshal back into a protov2 message. + return proto2.Unmarshal(respBytes, outResp.(proto2.Message)) + case gogoproto.Message: + // we can just call the handler after making a copy of the message, for safety reasons. + resp, err := method.Handler(handler, ctx, func(msg any) error { + // ref: https://github.com/cosmos/cosmos-sdk/issues/18003 + asGogoProto := msg.(gogoproto.Message) + // NOTE: using gogoproto.Merge will fail for some reason unknown to me, but + // using proto.Merge with gogo messages seems to work fine. + proto.Merge(asGogoProto, m) + return nil + }, nil) + if err != nil { + return err + } + // merge on the resp, ref: https://github.com/cosmos/cosmos-sdk/issues/18003 + // NOTE: using gogoproto.Merge will fail for some reason unknown to me, but + // using proto.Merge with gogo messages seems to work fine. + proto.Merge(outResp.(gogoproto.Message), resp.(gogoproto.Message)) + return nil + default: + panic("unreachable") + } + }, nil +} + +// isProtov2 returns true if the given method accepts protov2 messages. +// Returns false if it does not. +// It uses the decoder function passed to the method handler to determine +// the type. Since the decoder function is passed in by the concrete implementer the expected +// message where bytes are unmarshaled to, we can use that to determine the type. +func isProtov2(md grpc.MethodDesc) (isV2Type bool, err error) { + pullRequestType := func(msg any) error { + typ := reflect.TypeOf(msg) + switch { + case typ.Implements(protov2Type): + isV2Type = true + return nil + case typ.Implements(gogoType): + isV2Type = false + return nil + default: + err = fmt.Errorf("invalid request type %T, expected protov2 or gogo message", msg) + return nil + } + } + // doNotExecute is a dummy handler that stops the request execution. + doNotExecute := func(_ context.Context, _ any, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (any, error) { + return nil, nil + } + // we are allowed to pass in a nil context and nil request, since we are not actually executing the request. + // this is made possible by the doNotExecute function which immediately returns without calling other handlers. + _, _ = md.Handler(nil, nil, pullRequestType, doNotExecute) + return isV2Type, err +} + +// RequestFullNameFromMethodDesc returns the fully-qualified name of the request message of the provided service's method. +func RequestFullNameFromMethodDesc(sd *grpc.ServiceDesc, method grpc.MethodDesc) (protoreflect.FullName, error) { + methodFullName := protoreflect.FullName(fmt.Sprintf("%s.%s", sd.ServiceName, method.MethodName)) + desc, err := gogoproto.HybridResolver.FindDescriptorByName(methodFullName) + if err != nil { + return "", fmt.Errorf("cannot find method descriptor %s", methodFullName) + } + methodDesc, ok := desc.(protoreflect.MethodDescriptor) + if !ok { + return "", fmt.Errorf("invalid method descriptor %s", methodFullName) + } + return methodDesc.Input().FullName(), nil +} diff --git a/baseapp/msg_service_router.go b/baseapp/msg_service_router.go new file mode 100644 index 0000000..f39daa9 --- /dev/null +++ b/baseapp/msg_service_router.go @@ -0,0 +1,221 @@ +package baseapp + +import ( + "context" + "fmt" + + gogogrpc "github.com/cosmos/gogoproto/grpc" + "github.com/cosmos/gogoproto/proto" + "google.golang.org/grpc" + "google.golang.org/protobuf/runtime/protoiface" + + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/baseapp/internal/protocompat" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// MessageRouter ADR 031 request type routing +// https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-031-msg-service.md +type MessageRouter interface { + Handler(msg sdk.Msg) MsgServiceHandler + HandlerByTypeURL(typeURL string) MsgServiceHandler +} + +// MsgServiceRouter routes fully-qualified Msg service methods to their handler. +type MsgServiceRouter struct { + interfaceRegistry codectypes.InterfaceRegistry + routes map[string]MsgServiceHandler + hybridHandlers map[string]func(ctx context.Context, req, resp protoiface.MessageV1) error + circuitBreaker CircuitBreaker +} + +var _ gogogrpc.Server = &MsgServiceRouter{} + +// NewMsgServiceRouter creates a new MsgServiceRouter. +func NewMsgServiceRouter() *MsgServiceRouter { + return &MsgServiceRouter{ + routes: map[string]MsgServiceHandler{}, + hybridHandlers: map[string]func(ctx context.Context, req, resp protoiface.MessageV1) error{}, + } +} + +func (msr *MsgServiceRouter) SetCircuit(cb CircuitBreaker) { + msr.circuitBreaker = cb +} + +// MsgServiceHandler defines a function type which handles Msg service message. +type MsgServiceHandler = func(ctx sdk.Context, req sdk.Msg) (*sdk.Result, error) + +// Handler returns the MsgServiceHandler for a given msg or nil if not found. +func (msr *MsgServiceRouter) Handler(msg sdk.Msg) MsgServiceHandler { + return msr.routes[sdk.MsgTypeURL(msg)] +} + +// HandlerByTypeURL returns the MsgServiceHandler for a given query route path or nil +// if not found. +func (msr *MsgServiceRouter) HandlerByTypeURL(typeURL string) MsgServiceHandler { + return msr.routes[typeURL] +} + +// RegisterService implements the gRPC Server.RegisterService method. sd is a gRPC +// service description, handler is an object which implements that gRPC service. +// +// This function PANICs: +// - if it is called before the service `Msg`s have been registered using +// RegisterInterfaces, +// - or if a service is being registered twice. +func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler any) { + // Adds a top-level query handler based on the gRPC service name. + for _, method := range sd.Methods { + err := msr.registerMsgServiceHandler(sd, method, handler) + if err != nil { + panic(err) + } + err = msr.registerHybridHandler(sd, method, handler) + if err != nil { + panic(err) + } + } +} + +func (msr *MsgServiceRouter) HybridHandlerByMsgName(msgName string) func(ctx context.Context, req, resp protoiface.MessageV1) error { + return msr.hybridHandlers[msgName] +} + +func (msr *MsgServiceRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error { + inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method) + if err != nil { + return err + } + cdc := codec.NewProtoCodec(msr.interfaceRegistry) + hybridHandler, err := protocompat.MakeHybridHandler(cdc, sd, method, handler) + if err != nil { + return err + } + // if circuit breaker is not nil, then we decorate the hybrid handler with the circuit breaker + if msr.circuitBreaker == nil { + msr.hybridHandlers[string(inputName)] = hybridHandler + return nil + } + // decorate the hybrid handler with the circuit breaker + circuitBreakerHybridHandler := func(ctx context.Context, req, resp protoiface.MessageV1) error { + messageName := codectypes.MsgTypeURL(req) + allowed, err := msr.circuitBreaker.IsAllowed(ctx, messageName) + if err != nil { + return err + } + if !allowed { + return fmt.Errorf("circuit breaker disallows execution of message %s", messageName) + } + return hybridHandler(ctx, req, resp) + } + msr.hybridHandlers[string(inputName)] = circuitBreakerHybridHandler + return nil +} + +func (msr *MsgServiceRouter) registerMsgServiceHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler any) error { + fqMethod := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName) + methodHandler := method.Handler + + var requestTypeName string + + // NOTE: This is how we pull the concrete request type for each handler for registering in the InterfaceRegistry. + // This approach is maybe a bit hacky, but less hacky than reflecting on the handler object itself. + // We use a no-op interceptor to avoid actually calling into the handler itself. + _, _ = methodHandler(nil, context.Background(), func(i any) error { + msg, ok := i.(sdk.Msg) + if !ok { + // We panic here because there is no other alternative and the app cannot be initialized correctly + // this should only happen if there is a problem with code generation in which case the app won't + // work correctly anyway. + panic(fmt.Errorf("unable to register service method %s: %T does not implement sdk.Msg", fqMethod, i)) + } + + requestTypeName = sdk.MsgTypeURL(msg) + return nil + }, noopInterceptor) + + // Check that the service Msg fully-qualified method name has already + // been registered (via RegisterInterfaces). If the user registers a + // service without registering according service Msg type, there might be + // some unexpected behavior down the road. Since we can't return an error + // (`Server.RegisterService` interface restriction) we panic (at startup). + reqType, err := msr.interfaceRegistry.Resolve(requestTypeName) + if err != nil || reqType == nil { + return fmt.Errorf( + "type_url %s has not been registered yet. "+ + "Before calling RegisterService, you must register all interfaces by calling the `RegisterInterfaces` "+ + "method on module.BasicManager. Each module should call `msgservice.RegisterMsgServiceDesc` inside its "+ + "`RegisterInterfaces` method with the `_Msg_serviceDesc` generated by proto-gen", + requestTypeName, + ) + } + + // Check that each service is only registered once. If a service is + // registered more than once, then we should error. Since we can't + // return an error (`Server.RegisterService` interface restriction) we + // panic (at startup). + _, found := msr.routes[requestTypeName] + if found { + return fmt.Errorf( + "msg service %s has already been registered. Please make sure to only register each service once. "+ + "This usually means that there are conflicting modules registering the same msg service", + fqMethod, + ) + } + + msr.routes[requestTypeName] = func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + ctx = ctx.WithEventManager(sdk.NewEventManager()) + interceptor := func(goCtx context.Context, _ any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) { + goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx) + return handler(goCtx, msg) + } + + if m, ok := msg.(sdk.HasValidateBasic); ok { + if err := m.ValidateBasic(); err != nil { + return nil, err + } + } + + if msr.circuitBreaker != nil { + msgURL := sdk.MsgTypeURL(msg) + isAllowed, err := msr.circuitBreaker.IsAllowed(ctx, msgURL) + if err != nil { + return nil, err + } + + if !isAllowed { + return nil, fmt.Errorf("circuit breaker disables execution of this message: %s", msgURL) + } + } + + // Call the method handler from the service description with the handler object. + // We don't do any decoding here because the decoding was already done. + res, err := methodHandler(handler, ctx, noopDecoder, interceptor) + if err != nil { + return nil, err + } + + resMsg, ok := res.(proto.Message) + if !ok { + return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "Expecting proto.Message, got %T", resMsg) + } + + return sdk.WrapServiceResult(ctx, resMsg, err) + } + return nil +} + +// SetInterfaceRegistry sets the interface registry for the router. +func (msr *MsgServiceRouter) SetInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) { + msr.interfaceRegistry = interfaceRegistry +} + +func noopDecoder(_ any) error { return nil } +func noopInterceptor(_ context.Context, _ any, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (any, error) { + return nil, nil +} diff --git a/baseapp/msg_service_router_test.go b/baseapp/msg_service_router_test.go new file mode 100644 index 0000000..3c8dc28 --- /dev/null +++ b/baseapp/msg_service_router_test.go @@ -0,0 +1,202 @@ +package baseapp_test + +import ( + "context" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/depinject" + "cosmossdk.io/log" + + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" +) + +func TestRegisterMsgService(t *testing.T) { + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + registry codectypes.InterfaceRegistry + ) + err := depinject.Inject( + depinject.Configs( + makeMinimalConfig(), + depinject.Supply(log.NewTestLogger(t)), + ), &appBuilder, ®istry) + require.NoError(t, err) + app := appBuilder.Build(dbm.NewMemDB(), nil) + + require.Panics(t, func() { + testdata.RegisterMsgServer( + app.MsgServiceRouter(), + testdata.MsgServerImpl{}, + ) + }) + + // Register testdata Msg services, and rerun `RegisterMsgService`. + testdata.RegisterInterfaces(registry) + + require.NotPanics(t, func() { + testdata.RegisterMsgServer( + app.MsgServiceRouter(), + testdata.MsgServerImpl{}, + ) + }) +} + +func TestRegisterMsgServiceTwice(t *testing.T) { + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + registry codectypes.InterfaceRegistry + ) + err := depinject.Inject( + depinject.Configs( + makeMinimalConfig(), + depinject.Supply(log.NewTestLogger(t)), + ), &appBuilder, ®istry) + require.NoError(t, err) + db := dbm.NewMemDB() + app := appBuilder.Build(db, nil) + testdata.RegisterInterfaces(registry) + + // First time registering service shouldn't panic. + require.NotPanics(t, func() { + testdata.RegisterMsgServer( + app.MsgServiceRouter(), + testdata.MsgServerImpl{}, + ) + }) + + // Second time should panic. + require.Panics(t, func() { + testdata.RegisterMsgServer( + app.MsgServiceRouter(), + testdata.MsgServerImpl{}, + ) + }) +} + +func TestHybridHandlerByMsgName(t *testing.T) { + // Setup baseapp and router. + var ( + appBuilder *runtime.AppBuilder + registry codectypes.InterfaceRegistry + ) + err := depinject.Inject( + depinject.Configs( + makeMinimalConfig(), + depinject.Supply(log.NewTestLogger(t)), + ), &appBuilder, ®istry) + require.NoError(t, err) + db := dbm.NewMemDB() + app := appBuilder.Build(db, nil) + testdata.RegisterInterfaces(registry) + + testdata.RegisterMsgServer( + app.MsgServiceRouter(), + testdata.MsgServerImpl{}, + ) + + handler := app.MsgServiceRouter().HybridHandlerByMsgName("testpb.MsgCreateDog") + + require.NotNil(t, handler) + require.NoError(t, app.Init()) + ctx := app.NewContext(true) + resp := new(testdata.MsgCreateDogResponse) + err = handler(ctx, &testdata.MsgCreateDog{ + Dog: &testdata.Dog{Name: "Spot"}, + Owner: "me", + }, resp) + require.NoError(t, err) + require.Equal(t, resp.Name, "Spot") +} + +func TestMsgService(t *testing.T) { + priv, _, _ := testdata.KeyTestPubAddr() + + var ( + appBuilder *runtime.AppBuilder + cdc codec.Codec + interfaceRegistry codectypes.InterfaceRegistry + ) + err := depinject.Inject( + depinject.Configs( + makeMinimalConfig(), + depinject.Supply(log.NewNopLogger()), + ), &appBuilder, &cdc, &interfaceRegistry) + require.NoError(t, err) + app := appBuilder.Build(dbm.NewMemDB(), nil) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + defaultSignMode, err := authsigning.APISignModeToInternal(txConfig.SignModeHandler().DefaultMode()) + require.NoError(t, err) + + testdata.RegisterInterfaces(interfaceRegistry) + testdata.RegisterMsgServer( + app.MsgServiceRouter(), + testdata.MsgServerImpl{}, + ) + _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) + require.NoError(t, err) + + _, _, addr := testdata.KeyTestPubAddr() + msg := testdata.MsgCreateDog{ + Dog: &testdata.Dog{Name: "Spot"}, + Owner: addr.String(), + } + + txBuilder := txConfig.NewTxBuilder() + txBuilder.SetFeeAmount(testdata.NewTestFeeAmount()) + txBuilder.SetGasLimit(testdata.NewTestGasLimit()) + err = txBuilder.SetMsgs(&msg) + require.NoError(t, err) + + // First round: we gather all the signer infos. We use the "set empty + // signature" hack to do that. + sigV2 := signing.SignatureV2{ + PubKey: priv.PubKey(), + Data: &signing.SingleSignatureData{ + SignMode: defaultSignMode, + Signature: nil, + }, + Sequence: 0, + } + + err = txBuilder.SetSignatures(sigV2) + require.NoError(t, err) + + // Second round: all signer infos are set, so each signer can sign. + signerData := authsigning.SignerData{ + ChainID: "test", + AccountNumber: 0, + Sequence: 0, + PubKey: priv.PubKey(), + } + sigV2, err = tx.SignWithPrivKey( + context.TODO(), defaultSignMode, signerData, + txBuilder, priv, txConfig, 0) + require.NoError(t, err) + err = txBuilder.SetSignatures(sigV2) + require.NoError(t, err) + + // Send the tx to the app + txBytes, err := txConfig.TxEncoder()(txBuilder.GetTx()) + require.NoError(t, err) + res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) + require.NoError(t, err) + require.Equal(t, abci.CodeTypeOK, res.TxResults[0].Code, "res=%+v", res) +} diff --git a/baseapp/noopgasmeter.go b/baseapp/noopgasmeter.go new file mode 100644 index 0000000..f304aa1 --- /dev/null +++ b/baseapp/noopgasmeter.go @@ -0,0 +1,17 @@ +package baseapp + +import storetypes "cosmossdk.io/store/types" + +type noopGasMeter struct{} + +var _ storetypes.GasMeter = noopGasMeter{} + +func (noopGasMeter) GasConsumed() storetypes.Gas { return 0 } +func (noopGasMeter) GasConsumedToLimit() storetypes.Gas { return 0 } +func (noopGasMeter) GasRemaining() storetypes.Gas { return 0 } +func (noopGasMeter) Limit() storetypes.Gas { return 0 } +func (noopGasMeter) ConsumeGas(storetypes.Gas, string) {} +func (noopGasMeter) RefundGas(storetypes.Gas, string) {} +func (noopGasMeter) IsPastLimit() bool { return false } +func (noopGasMeter) IsOutOfGas() bool { return false } +func (noopGasMeter) String() string { return "noopGasMeter" } diff --git a/baseapp/oe/optimistic_execution.go b/baseapp/oe/optimistic_execution.go new file mode 100644 index 0000000..6ba0c3c --- /dev/null +++ b/baseapp/oe/optimistic_execution.go @@ -0,0 +1,160 @@ +package oe + +import ( + "bytes" + "context" + "encoding/hex" + "math/rand" + "sync" + "time" + + abci "github.com/cometbft/cometbft/abci/types" + + "cosmossdk.io/log" +) + +// FinalizeBlockFunc is the function that is called by the OE to finalize the +// block. It is the same as the one in the ABCI app. +type FinalizeBlockFunc func(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) + +// OptimisticExecution is a struct that contains the OE context. It is used to +// run the FinalizeBlock function in a goroutine, and to abort it if needed. +type OptimisticExecution struct { + finalizeBlockFunc FinalizeBlockFunc // ABCI FinalizeBlock function with a context + logger log.Logger + + mtx sync.Mutex + stopCh chan struct{} + request *abci.RequestFinalizeBlock + response *abci.ResponseFinalizeBlock + err error + cancelFunc func() // cancel function for the context + initialized bool // A boolean value indicating whether the struct has been initialized + + // debugging/testing options + abortRate int // number from 0 to 100 that determines the percentage of OE that should be aborted +} + +// NewOptimisticExecution initializes the Optimistic Execution context but does not start it. +func NewOptimisticExecution(logger log.Logger, fn FinalizeBlockFunc, opts ...func(*OptimisticExecution)) *OptimisticExecution { + logger = logger.With(log.ModuleKey, "oe") + oe := &OptimisticExecution{logger: logger, finalizeBlockFunc: fn} + for _, opt := range opts { + opt(oe) + } + return oe +} + +// WithAbortRate sets the abort rate for the OE. The abort rate is a number from +// 0 to 100 that determines the percentage of OE that should be aborted. +// This is for testing purposes only and must not be used in production. +func WithAbortRate(rate int) func(*OptimisticExecution) { + return func(oe *OptimisticExecution) { + oe.abortRate = rate + } +} + +// Reset resets the OE context. Must be called whenever we want to invalidate +// the current OE. +func (oe *OptimisticExecution) Reset() { + oe.mtx.Lock() + defer oe.mtx.Unlock() + oe.request = nil + oe.response = nil + oe.err = nil + oe.initialized = false +} + +func (oe *OptimisticExecution) Enabled() bool { + return oe != nil +} + +// Initialized returns true if the OE was initialized, meaning that it contains +// a request and it was run or it is running. +func (oe *OptimisticExecution) Initialized() bool { + if oe == nil { + return false + } + oe.mtx.Lock() + defer oe.mtx.Unlock() + + return oe.initialized +} + +// Execute initializes the OE and starts it in a goroutine. +func (oe *OptimisticExecution) Execute(req *abci.RequestProcessProposal) { + oe.mtx.Lock() + defer oe.mtx.Unlock() + + oe.stopCh = make(chan struct{}) + oe.request = &abci.RequestFinalizeBlock{ + Txs: req.Txs, + DecidedLastCommit: req.ProposedLastCommit, + Misbehavior: req.Misbehavior, + Hash: req.Hash, + Height: req.Height, + Time: req.Time, + NextValidatorsHash: req.NextValidatorsHash, + ProposerAddress: req.ProposerAddress, + } + + oe.logger.Debug("OE started", "height", req.Height, "hash", hex.EncodeToString(req.Hash), "time", req.Time.String()) + ctx, cancel := context.WithCancel(context.Background()) + oe.cancelFunc = cancel + oe.initialized = true + + go func() { + start := time.Now() + resp, err := oe.finalizeBlockFunc(ctx, oe.request) + + oe.mtx.Lock() + + executionTime := time.Since(start) + oe.logger.Debug("OE finished", "duration", executionTime.String(), "height", oe.request.Height, "hash", hex.EncodeToString(oe.request.Hash)) + oe.response, oe.err = resp, err + + close(oe.stopCh) + oe.mtx.Unlock() + }() +} + +// AbortIfNeeded aborts the OE if the request hash is not the same as the one in +// the running OE. Returns true if the OE was aborted. +func (oe *OptimisticExecution) AbortIfNeeded(reqHash []byte) bool { + if oe == nil { + return false + } + + oe.mtx.Lock() + defer oe.mtx.Unlock() + + if !bytes.Equal(oe.request.Hash, reqHash) { + oe.logger.Error("OE aborted due to hash mismatch", "oe_hash", hex.EncodeToString(oe.request.Hash), "req_hash", hex.EncodeToString(reqHash), "oe_height", oe.request.Height, "req_height", oe.request.Height) + oe.cancelFunc() + return true + } else if oe.abortRate > 0 && rand.Intn(100) < oe.abortRate { + // this is for test purposes only, we can emulate a certain percentage of + // OE needed to be aborted. + oe.cancelFunc() + oe.logger.Error("OE aborted due to test abort rate") + return true + } + + return false +} + +// Abort aborts the OE unconditionally and waits for it to finish. +func (oe *OptimisticExecution) Abort() { + if oe == nil || oe.cancelFunc == nil { + return + } + + oe.cancelFunc() + <-oe.stopCh +} + +// WaitResult waits for the OE to finish and returns the result. +func (oe *OptimisticExecution) WaitResult() (*abci.ResponseFinalizeBlock, error) { + <-oe.stopCh + return oe.response, oe.err +} diff --git a/baseapp/oe/optimistic_execution_test.go b/baseapp/oe/optimistic_execution_test.go new file mode 100644 index 0000000..d8a3a83 --- /dev/null +++ b/baseapp/oe/optimistic_execution_test.go @@ -0,0 +1,34 @@ +package oe + +import ( + "context" + "errors" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/stretchr/testify/assert" + + "cosmossdk.io/log" +) + +func testFinalizeBlock(_ context.Context, _ *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + return nil, errors.New("test error") +} + +func TestOptimisticExecution(t *testing.T) { + oe := NewOptimisticExecution(log.NewNopLogger(), testFinalizeBlock) + assert.True(t, oe.Enabled()) + oe.Execute(&abci.RequestProcessProposal{ + Hash: []byte("test"), + }) + assert.True(t, oe.Initialized()) + + resp, err := oe.WaitResult() + assert.Nil(t, resp) + assert.EqualError(t, err, "test error") + + assert.False(t, oe.AbortIfNeeded([]byte("test"))) + assert.True(t, oe.AbortIfNeeded([]byte("wrong_hash"))) + + oe.Reset() +} diff --git a/baseapp/options.go b/baseapp/options.go new file mode 100644 index 0000000..359ce39 --- /dev/null +++ b/baseapp/options.go @@ -0,0 +1,405 @@ +package baseapp + +import ( + "fmt" + "io" + "math" + + dbm "github.com/cosmos/cosmos-db" + + "cosmossdk.io/store/metrics" + pruningtypes "cosmossdk.io/store/pruning/types" + "cosmossdk.io/store/snapshots" + snapshottypes "cosmossdk.io/store/snapshots/types" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp/oe" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/mempool" +) + +// File for storing in-package BaseApp optional functions, +// for options that need access to non-exported fields of the BaseApp + +// SetPruning sets a pruning option on the multistore associated with the app +func SetPruning(opts pruningtypes.PruningOptions) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.cms.SetPruning(opts) } +} + +// SetMinGasPrices returns an option that sets the minimum gas prices on the app. +func SetMinGasPrices(gasPricesStr string) func(*BaseApp) { + gasPrices, err := sdk.ParseDecCoins(gasPricesStr) + if err != nil { + panic(fmt.Sprintf("invalid minimum gas prices: %v", err)) + } + + return func(bapp *BaseApp) { bapp.setMinGasPrices(gasPrices) } +} + +// SetQueryGasLimit returns an option that sets a gas limit for queries. +func SetQueryGasLimit(queryGasLimit uint64) func(*BaseApp) { + if queryGasLimit == 0 { + queryGasLimit = math.MaxUint64 + } + + return func(bapp *BaseApp) { bapp.queryGasLimit = queryGasLimit } +} + +// SetHaltHeight returns a BaseApp option function that sets the halt block height. +func SetHaltHeight(blockHeight uint64) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.setHaltHeight(blockHeight) } +} + +// SetHaltTime returns a BaseApp option function that sets the halt block time. +func SetHaltTime(haltTime uint64) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.setHaltTime(haltTime) } +} + +// SetMinRetainBlocks returns a BaseApp option function that sets the minimum +// block retention height value when determining which heights to prune during +// ABCI Commit. +func SetMinRetainBlocks(minRetainBlocks uint64) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.setMinRetainBlocks(minRetainBlocks) } +} + +// SetTrace will turn on or off trace flag +func SetTrace(trace bool) func(*BaseApp) { + return func(app *BaseApp) { app.setTrace(trace) } +} + +// SetIndexEvents provides a BaseApp option function that sets the events to index. +func SetIndexEvents(ie []string) func(*BaseApp) { + return func(app *BaseApp) { app.setIndexEvents(ie) } +} + +// SetIAVLCacheSize provides a BaseApp option function that sets the size of IAVL cache. +func SetIAVLCacheSize(size int) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.cms.SetIAVLCacheSize(size) } +} + +// SetIAVLDisableFastNode enables(false)/disables(true) fast node usage from the IAVL store. +func SetIAVLDisableFastNode(disable bool) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.cms.SetIAVLDisableFastNode(disable) } +} + +// SetIAVLSyncPruning set sync/async pruning in the IAVL store. Developers should rarely use this. +// This option was added to allow the `Prune` command to force synchronous pruning, which is needed to allow the +// command to wait before returning. +func SetIAVLSyncPruning(syncPruning bool) func(*BaseApp) { + return func(bapp *BaseApp) { bapp.cms.SetIAVLSyncPruning(syncPruning) } +} + +// SetInterBlockCache provides a BaseApp option function that sets the +// inter-block cache. +func SetInterBlockCache(cache storetypes.MultiStorePersistentCache) func(*BaseApp) { + return func(app *BaseApp) { app.setInterBlockCache(cache) } +} + +// SetSnapshot sets the snapshot store. +func SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) func(*BaseApp) { + return func(app *BaseApp) { app.SetSnapshot(snapshotStore, opts) } +} + +// SetMempool sets the mempool on BaseApp. +func SetMempool(mempool mempool.Mempool) func(*BaseApp) { + return func(app *BaseApp) { app.SetMempool(mempool) } +} + +// SetChainID sets the chain ID in BaseApp. +func SetChainID(chainID string) func(*BaseApp) { + return func(app *BaseApp) { app.chainID = chainID } +} + +// SetStoreLoader allows customization of the rootMultiStore initialization. +func SetStoreLoader(loader StoreLoader) func(*BaseApp) { + return func(app *BaseApp) { app.SetStoreLoader(loader) } +} + +// SetOptimisticExecution enables optimistic execution. +func SetOptimisticExecution(opts ...func(*oe.OptimisticExecution)) func(*BaseApp) { + return func(app *BaseApp) { + app.optimisticExec = oe.NewOptimisticExecution(app.logger, app.internalFinalizeBlock, opts...) + } +} + +// DisableBlockGasMeter disables the block gas meter. +func DisableBlockGasMeter() func(*BaseApp) { + return func(app *BaseApp) { app.SetDisableBlockGasMeter(true) } +} + +func (app *BaseApp) SetName(name string) { + if app.sealed { + panic("SetName() on sealed BaseApp") + } + + app.name = name +} + +// SetParamStore sets a parameter store on the BaseApp. +func (app *BaseApp) SetParamStore(ps ParamStore) { + if app.sealed { + panic("SetParamStore() on sealed BaseApp") + } + + app.paramStore = ps +} + +// SetVersion sets the application's version string. +func (app *BaseApp) SetVersion(v string) { + if app.sealed { + panic("SetVersion() on sealed BaseApp") + } + app.version = v +} + +// SetProtocolVersion sets the application's protocol version +func (app *BaseApp) SetProtocolVersion(v uint64) { + app.appVersion = v +} + +func (app *BaseApp) SetDB(db dbm.DB) { + if app.sealed { + panic("SetDB() on sealed BaseApp") + } + + app.db = db +} + +func (app *BaseApp) SetCMS(cms storetypes.CommitMultiStore) { + if app.sealed { + panic("SetCMS() on sealed BaseApp") + } + + app.cms = cms +} + +func (app *BaseApp) SetInitChainer(initChainer sdk.InitChainer) { + if app.sealed { + panic("SetInitChainer() on sealed BaseApp") + } + + app.initChainer = initChainer +} + +func (app *BaseApp) PreBlocker() sdk.PreBlocker { + return app.preBlocker +} + +func (app *BaseApp) SetPreBlocker(preBlocker sdk.PreBlocker) { + if app.sealed { + panic("SetPreBlocker() on sealed BaseApp") + } + + app.preBlocker = preBlocker +} + +func (app *BaseApp) SetBeginBlocker(beginBlocker sdk.BeginBlocker) { + if app.sealed { + panic("SetBeginBlocker() on sealed BaseApp") + } + + app.beginBlocker = beginBlocker +} + +func (app *BaseApp) SetEndBlocker(endBlocker sdk.EndBlocker) { + if app.sealed { + panic("SetEndBlocker() on sealed BaseApp") + } + + app.endBlocker = endBlocker +} + +func (app *BaseApp) SetPrepareCheckStater(prepareCheckStater sdk.PrepareCheckStater) { + if app.sealed { + panic("SetPrepareCheckStater() on sealed BaseApp") + } + + app.prepareCheckStater = prepareCheckStater +} + +func (app *BaseApp) SetPrecommiter(precommiter sdk.Precommiter) { + if app.sealed { + panic("SetPrecommiter() on sealed BaseApp") + } + + app.precommiter = precommiter +} + +func (app *BaseApp) SetAnteHandler(ah sdk.AnteHandler) { + if app.sealed { + panic("SetAnteHandler() on sealed BaseApp") + } + + app.anteHandler = ah +} + +func (app *BaseApp) SetPostHandler(ph sdk.PostHandler) { + if app.sealed { + panic("SetPostHandler() on sealed BaseApp") + } + + app.postHandler = ph +} + +func (app *BaseApp) SetAddrPeerFilter(pf sdk.PeerFilter) { + if app.sealed { + panic("SetAddrPeerFilter() on sealed BaseApp") + } + + app.addrPeerFilter = pf +} + +func (app *BaseApp) SetIDPeerFilter(pf sdk.PeerFilter) { + if app.sealed { + panic("SetIDPeerFilter() on sealed BaseApp") + } + + app.idPeerFilter = pf +} + +func (app *BaseApp) SetFauxMerkleMode() { + if app.sealed { + panic("SetFauxMerkleMode() on sealed BaseApp") + } + + app.fauxMerkleMode = true +} + +// SetNotSigverify during simulation testing, transaction signature verification needs to be ignored. +func (app *BaseApp) SetNotSigverifyTx() { + app.sigverifyTx = false +} + +// SetCommitMultiStoreTracer sets the store tracer on the BaseApp's underlying +// CommitMultiStore. +func (app *BaseApp) SetCommitMultiStoreTracer(w io.Writer) { + app.cms.SetTracer(w) +} + +// SetStoreLoader allows us to customize the rootMultiStore initialization. +func (app *BaseApp) SetStoreLoader(loader StoreLoader) { + if app.sealed { + panic("SetStoreLoader() on sealed BaseApp") + } + + app.storeLoader = loader +} + +// SetSnapshot sets the snapshot store and options. +func (app *BaseApp) SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) { + if app.sealed { + panic("SetSnapshot() on sealed BaseApp") + } + if snapshotStore == nil { + app.snapshotManager = nil + return + } + app.cms.SetSnapshotInterval(opts.Interval) + app.snapshotManager = snapshots.NewManager(snapshotStore, opts, app.cms, nil, app.logger) +} + +// SetInterfaceRegistry sets the InterfaceRegistry. +func (app *BaseApp) SetInterfaceRegistry(registry types.InterfaceRegistry) { + app.interfaceRegistry = registry + app.grpcQueryRouter.SetInterfaceRegistry(registry) + app.msgServiceRouter.SetInterfaceRegistry(registry) + app.cdc = codec.NewProtoCodec(registry) +} + +// SetTxDecoder sets the TxDecoder if it wasn't provided in the BaseApp constructor. +func (app *BaseApp) SetTxDecoder(txDecoder sdk.TxDecoder) { + app.txDecoder = txDecoder +} + +// SetTxEncoder sets the TxEncoder if it wasn't provided in the BaseApp constructor. +func (app *BaseApp) SetTxEncoder(txEncoder sdk.TxEncoder) { + app.txEncoder = txEncoder +} + +// SetQueryMultiStore set a alternative MultiStore implementation to support grpc query service. +// +// Ref: https://github.com/cosmos/cosmos-sdk/issues/13317 +func (app *BaseApp) SetQueryMultiStore(ms storetypes.MultiStore) { + app.qms = ms +} + +// SetMempool sets the mempool for the BaseApp and is required for the app to start up. +func (app *BaseApp) SetMempool(mempool mempool.Mempool) { + if app.sealed { + panic("SetMempool() on sealed BaseApp") + } + app.mempool = mempool +} + +// SetProcessProposal sets the process proposal function for the BaseApp. +func (app *BaseApp) SetProcessProposal(handler sdk.ProcessProposalHandler) { + if app.sealed { + panic("SetProcessProposal() on sealed BaseApp") + } + app.processProposal = handler +} + +// SetPrepareProposal sets the prepare proposal function for the BaseApp. +func (app *BaseApp) SetPrepareProposal(handler sdk.PrepareProposalHandler) { + if app.sealed { + panic("SetPrepareProposal() on sealed BaseApp") + } + + app.prepareProposal = handler +} + +// SetCheckTx sets the checkTx function for the BaseApp. +func (app *BaseApp) SetCheckTxHandler(handler sdk.CheckTxHandler) { + if app.sealed { + panic("SetCheckTxHandler() on sealed BaseApp") + } + + app.checkTxHandler = handler +} + +func (app *BaseApp) SetExtendVoteHandler(handler sdk.ExtendVoteHandler) { + if app.sealed { + panic("SetExtendVoteHandler() on sealed BaseApp") + } + + app.extendVote = handler +} + +func (app *BaseApp) SetVerifyVoteExtensionHandler(handler sdk.VerifyVoteExtensionHandler) { + if app.sealed { + panic("SetVerifyVoteExtensionHandler() on sealed BaseApp") + } + + app.verifyVoteExt = handler +} + +// SetStoreMetrics sets the prepare proposal function for the BaseApp. +func (app *BaseApp) SetStoreMetrics(gatherer metrics.StoreMetrics) { + if app.sealed { + panic("SetStoreMetrics() on sealed BaseApp") + } + + app.cms.SetMetrics(gatherer) +} + +// SetStreamingManager sets the streaming manager for the BaseApp. +func (app *BaseApp) SetStreamingManager(manager storetypes.StreamingManager) { + app.streamingManager = manager +} + +// SetDisableBlockGasMeter sets the disableBlockGasMeter flag for the BaseApp. +func (app *BaseApp) SetDisableBlockGasMeter(disableBlockGasMeter bool) { + app.disableBlockGasMeter = disableBlockGasMeter +} + +// SetMsgServiceRouter sets the MsgServiceRouter of a BaseApp. +func (app *BaseApp) SetMsgServiceRouter(msgServiceRouter *MsgServiceRouter) { + app.msgServiceRouter = msgServiceRouter +} + +// SetGRPCQueryRouter sets the GRPCQueryRouter of the BaseApp. +func (app *BaseApp) SetGRPCQueryRouter(grpcQueryRouter *GRPCQueryRouter) { + app.grpcQueryRouter = grpcQueryRouter +} diff --git a/baseapp/params.go b/baseapp/params.go new file mode 100644 index 0000000..c0154cb --- /dev/null +++ b/baseapp/params.go @@ -0,0 +1,15 @@ +package baseapp + +import ( + "context" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" +) + +// ParamStore defines the interface the parameter store used by the BaseApp must +// fulfill. +type ParamStore interface { + Get(ctx context.Context) (cmtproto.ConsensusParams, error) + Has(ctx context.Context) (bool, error) + Set(ctx context.Context, cp cmtproto.ConsensusParams) error +} diff --git a/baseapp/params_legacy.go b/baseapp/params_legacy.go new file mode 100644 index 0000000..761060a --- /dev/null +++ b/baseapp/params_legacy.go @@ -0,0 +1,150 @@ +/* +Deprecated. + +Legacy types are defined below to aid in the migration of CometBFT consensus +parameters from use of the now deprecated x/params modules to a new dedicated +x/consensus module. + +Application developers should ensure that they implement their upgrade handler +correctly such that app.ConsensusParamsKeeper.Set() is called with the values +returned by GetConsensusParams(). + +Example: + + baseAppLegacySS := app.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()) + + app.UpgradeKeeper.SetUpgradeHandler( + UpgradeName, + func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + if cp := baseapp.GetConsensusParams(ctx, baseAppLegacySS); cp != nil { + app.ConsensusParamsKeeper.Set(ctx, cp) + } else { + ctx.Logger().Info("warning: consensus parameters are undefined; skipping migration", "upgrade", UpgradeName) + } + + return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM) + }, + ) + +Developers can also bypass the use of the legacy Params subspace and set the +values to app.ConsensusParamsKeeper.Set() explicitly. + +Note, for new chains this is not necessary as CometBFT's consensus parameters +will automatically be set for you in InitChain. +*/ +package baseapp + +import ( + "errors" + "fmt" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const Paramspace = "baseapp" + +var ( + ParamStoreKeyBlockParams = []byte("BlockParams") + ParamStoreKeyEvidenceParams = []byte("EvidenceParams") + ParamStoreKeyValidatorParams = []byte("ValidatorParams") +) + +type LegacyParamStore interface { + Get(ctx sdk.Context, key []byte, ptr any) + Has(ctx sdk.Context, key []byte) bool +} + +func ValidateBlockParams(i any) error { + v, ok := i.(cmtproto.BlockParams) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.MaxBytes <= 0 { + return fmt.Errorf("block maximum bytes must be positive: %d", v.MaxBytes) + } + + if v.MaxGas < -1 { + return fmt.Errorf("block maximum gas must be greater than or equal to -1: %d", v.MaxGas) + } + + return nil +} + +func ValidateEvidenceParams(i any) error { + v, ok := i.(cmtproto.EvidenceParams) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if v.MaxAgeNumBlocks <= 0 { + return fmt.Errorf("evidence maximum age in blocks must be positive: %d", v.MaxAgeNumBlocks) + } + + if v.MaxAgeDuration <= 0 { + return fmt.Errorf("evidence maximum age time duration must be positive: %v", v.MaxAgeDuration) + } + + if v.MaxBytes < 0 { + return fmt.Errorf("maximum evidence bytes must be non-negative: %v", v.MaxBytes) + } + + return nil +} + +func ValidateValidatorParams(i any) error { + v, ok := i.(cmtproto.ValidatorParams) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + if len(v.PubKeyTypes) == 0 { + return errors.New("validator allowed pubkey types must not be empty") + } + + return nil +} + +func GetConsensusParams(ctx sdk.Context, paramStore LegacyParamStore) *cmtproto.ConsensusParams { + if paramStore == nil { + return nil + } + + cp := new(cmtproto.ConsensusParams) + + if paramStore.Has(ctx, ParamStoreKeyBlockParams) { + var bp cmtproto.BlockParams + + paramStore.Get(ctx, ParamStoreKeyBlockParams, &bp) + cp.Block = &bp + } + + if paramStore.Has(ctx, ParamStoreKeyEvidenceParams) { + var ep cmtproto.EvidenceParams + + paramStore.Get(ctx, ParamStoreKeyEvidenceParams, &ep) + cp.Evidence = &ep + } + + if paramStore.Has(ctx, ParamStoreKeyValidatorParams) { + var vp cmtproto.ValidatorParams + + paramStore.Get(ctx, ParamStoreKeyValidatorParams, &vp) + cp.Validator = &vp + } + + return cp +} + +func MigrateParams(ctx sdk.Context, lps LegacyParamStore, ps ParamStore) error { + if cp := GetConsensusParams(ctx, lps); cp != nil { + if err := ps.Set(ctx, *cp); err != nil { + return err + } + } else { + ctx.Logger().Info("warning: consensus parameters are undefined; skipping migration") + } + return nil +} diff --git a/baseapp/recovery.go b/baseapp/recovery.go new file mode 100644 index 0000000..6ff4ac5 --- /dev/null +++ b/baseapp/recovery.go @@ -0,0 +1,80 @@ +package baseapp + +import ( + "fmt" + "runtime/debug" + + errorsmod "cosmossdk.io/errors" + storetypes "cosmossdk.io/store/types" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// RecoveryHandler handles recovery() object. +// Return a non-nil error if recoveryObj was processed. +// Return nil if recoveryObj was not processed. +type RecoveryHandler func(recoveryObj any) error + +// recoveryMiddleware is wrapper for RecoveryHandler to create chained recovery handling. +// returns (recoveryMiddleware, nil) if recoveryObj was not processed and should be passed to the next middleware in chain. +// returns (nil, error) if recoveryObj was processed and middleware chain processing should be stopped. +type recoveryMiddleware func(recoveryObj any) (recoveryMiddleware, error) + +// processRecovery processes recoveryMiddleware chain for recovery() object. +// Chain processing stops on non-nil error or when chain is processed. +func processRecovery(recoveryObj any, middleware recoveryMiddleware) error { + if middleware == nil { + return nil + } + + next, err := middleware(recoveryObj) + if err != nil { + return err + } + + return processRecovery(recoveryObj, next) +} + +// newRecoveryMiddleware creates a RecoveryHandler middleware. +func newRecoveryMiddleware(handler RecoveryHandler, next recoveryMiddleware) recoveryMiddleware { + return func(recoveryObj any) (recoveryMiddleware, error) { + if err := handler(recoveryObj); err != nil { + return nil, err + } + + return next, nil + } +} + +// newOutOfGasRecoveryMiddleware creates a standard OutOfGas recovery middleware for app.runTx method. +func newOutOfGasRecoveryMiddleware(gasWanted uint64, ctx sdk.Context, next recoveryMiddleware) recoveryMiddleware { + handler := func(recoveryObj any) error { + err, ok := recoveryObj.(storetypes.ErrorOutOfGas) + if !ok { + return nil + } + + return errorsmod.Wrap( + sdkerrors.ErrOutOfGas, fmt.Sprintf( + "out of gas in location: %v; gasWanted: %d, gasUsed: %d", + err.Descriptor, gasWanted, ctx.GasMeter().GasConsumed(), + ), + ) + } + + return newRecoveryMiddleware(handler, next) +} + +// newDefaultRecoveryMiddleware creates a default (last in chain) recovery middleware for app.runTx method. +func newDefaultRecoveryMiddleware() recoveryMiddleware { + handler := func(recoveryObj any) error { + return errorsmod.Wrap( + sdkerrors.ErrPanic, fmt.Sprintf( + "recovered: %v\nstack:\n%v", recoveryObj, string(debug.Stack()), + ), + ) + } + + return newRecoveryMiddleware(handler, nil) +} diff --git a/baseapp/recovery_test.go b/baseapp/recovery_test.go new file mode 100644 index 0000000..99b89b1 --- /dev/null +++ b/baseapp/recovery_test.go @@ -0,0 +1,64 @@ +package baseapp + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +// Test that recovery chain produces expected error at specific middleware layer +func TestRecoveryChain(t *testing.T) { + createError := func(id int) error { + return fmt.Errorf("error from id: %d", id) + } + + createHandler := func(id int, handle bool) RecoveryHandler { + return func(_ any) error { + if handle { + return createError(id) + } + return nil + } + } + + // check recovery chain [1] -> 2 -> 3 + { + mw := newRecoveryMiddleware(createHandler(3, false), nil) + mw = newRecoveryMiddleware(createHandler(2, false), mw) + mw = newRecoveryMiddleware(createHandler(1, true), mw) + receivedErr := processRecovery(nil, mw) + + require.Equal(t, createError(1), receivedErr) + } + + // check recovery chain 1 -> [2] -> 3 + { + mw := newRecoveryMiddleware(createHandler(3, false), nil) + mw = newRecoveryMiddleware(createHandler(2, true), mw) + mw = newRecoveryMiddleware(createHandler(1, false), mw) + receivedErr := processRecovery(nil, mw) + + require.Equal(t, createError(2), receivedErr) + } + + // check recovery chain 1 -> 2 -> [3] + { + mw := newRecoveryMiddleware(createHandler(3, true), nil) + mw = newRecoveryMiddleware(createHandler(2, false), mw) + mw = newRecoveryMiddleware(createHandler(1, false), mw) + receivedErr := processRecovery(nil, mw) + + require.Equal(t, createError(3), receivedErr) + } + + // check recovery chain 1 -> 2 -> 3 + { + mw := newRecoveryMiddleware(createHandler(3, false), nil) + mw = newRecoveryMiddleware(createHandler(2, false), mw) + mw = newRecoveryMiddleware(createHandler(1, false), mw) + receivedErr := processRecovery(nil, mw) + + require.Nil(t, receivedErr) + } +} diff --git a/baseapp/regression_test.go b/baseapp/regression_test.go new file mode 100644 index 0000000..4c2faa2 --- /dev/null +++ b/baseapp/regression_test.go @@ -0,0 +1,41 @@ +package baseapp + +import ( + "testing" + + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + "cosmossdk.io/log" + "cosmossdk.io/store" + storemetrics "cosmossdk.io/store/metrics" +) + +// Ensures that error checks are performed before sealing the app. +// Please see https://github.com/cosmos/cosmos-sdk/issues/18726 +func TestNilCmsCheckBeforeSeal(t *testing.T) { + app := new(BaseApp) + + // 1. Invoking app.Init with a nil cms MUST not seal the app + // and should return an error firstly, which can later be reversed. + for range 10 { // N times, the app shouldn't be sealed. + err := app.Init() + require.Error(t, err) + require.Contains(t, err.Error(), "commit multi-store must not be nil") + require.False(t, app.IsSealed(), "the app MUST not be sealed") + } + + // 2. Now that we've figured out and gotten back an error, let's rectify the problem. + // and we should be able to set the commit multistore then reinvoke app.Init successfully! + db := dbm.NewMemDB() + logger := log.NewTestLogger(t) + app.cms = store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()) + err := app.Init() + require.Nil(t, err, "app.Init MUST now succeed") + require.True(t, app.IsSealed(), "the app must now be sealed") + + // 3. Now we should expect a panic because the app is sealed. + require.Panics(t, func() { + _ = app.Init() + }) +} diff --git a/baseapp/snapshot_test.go b/baseapp/snapshot_test.go new file mode 100644 index 0000000..2f9c572 --- /dev/null +++ b/baseapp/snapshot_test.go @@ -0,0 +1,346 @@ +package baseapp_test + +import ( + "context" + "fmt" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/stretchr/testify/require" + + pruningtypes "cosmossdk.io/store/pruning/types" + snapshottypes "cosmossdk.io/store/snapshots/types" +) + +func TestABCI_ListSnapshots(t *testing.T) { + ssCfg := SnapshotsConfig{ + blocks: 5, + blockTxs: 4, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + + suite := NewBaseAppSuiteWithSnapshots(t, ssCfg) + + resp, err := suite.baseApp.ListSnapshots(&abci.RequestListSnapshots{}) + require.NoError(t, err) + for _, s := range resp.Snapshots { + require.NotEmpty(t, s.Hash) + require.NotEmpty(t, s.Metadata) + + s.Hash = nil + s.Metadata = nil + } + + require.Equal(t, &abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ + {Height: 4, Format: snapshottypes.CurrentFormat, Chunks: 2}, + {Height: 2, Format: snapshottypes.CurrentFormat, Chunks: 1}, + }}, resp) +} + +func TestABCI_SnapshotWithPruning(t *testing.T) { + testCases := map[string]struct { + ssCfg SnapshotsConfig + expectedSnapshots []*abci.Snapshot + }{ + "prune nothing with snapshot": { + ssCfg: SnapshotsConfig{ + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 1, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, + }, + }, + "prune everything with snapshot": { + ssCfg: SnapshotsConfig{ + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 1, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, + }, + }, + "default pruning with snapshot": { + ssCfg: SnapshotsConfig{ + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 1, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, + }, + }, + "custom": { + ssCfg: SnapshotsConfig{ + blocks: 25, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewCustomPruningOptions(12, 12), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 25, Format: snapshottypes.CurrentFormat, Chunks: 6}, + {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, + }, + }, + "no snapshots": { + ssCfg: SnapshotsConfig{ + blocks: 10, + blockTxs: 2, + snapshotInterval: 0, // 0 implies disable snapshots + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + }, + expectedSnapshots: []*abci.Snapshot{}, + }, + "keep all snapshots": { + ssCfg: SnapshotsConfig{ + blocks: 10, + blockTxs: 2, + snapshotInterval: 3, + snapshotKeepRecent: 0, // 0 implies keep all snapshots + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 9, Format: snapshottypes.CurrentFormat, Chunks: 2}, + {Height: 6, Format: snapshottypes.CurrentFormat, Chunks: 2}, + {Height: 3, Format: snapshottypes.CurrentFormat, Chunks: 1}, + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + suite := NewBaseAppSuiteWithSnapshots(t, tc.ssCfg) + + resp, err := suite.baseApp.ListSnapshots(&abci.RequestListSnapshots{}) + require.NoError(t, err) + for _, s := range resp.Snapshots { + require.NotEmpty(t, s.Hash) + require.NotEmpty(t, s.Metadata) + + s.Hash = nil + s.Metadata = nil + } + + require.Equal(t, &abci.ResponseListSnapshots{Snapshots: tc.expectedSnapshots}, resp) + + // Validate that heights were pruned correctly by querying the state at the last height that should be present relative to latest + // and the first height that should be pruned. + // + // Exceptions: + // * Prune nothing: should be able to query all heights (we only test first and latest) + // * Prune default: should be able to query all heights (we only test first and latest) + // * The reason for default behaving this way is that we only commit 20 heights but default has 100_000 keep-recent + var lastExistingHeight int64 + if tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { + lastExistingHeight = 1 + } else { + // Integer division rounds down so by multiplying back we get the last height at which we pruned + lastExistingHeight = int64((tc.ssCfg.blocks/tc.ssCfg.pruningOpts.Interval)*tc.ssCfg.pruningOpts.Interval - tc.ssCfg.pruningOpts.KeepRecent) + } + + // Query 1 + res, err := suite.baseApp.Query(context.TODO(), &abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight}) + require.NoError(t, err) + require.NotNil(t, res, "height: %d", lastExistingHeight) + require.NotNil(t, res.Value, "height: %d", lastExistingHeight) + + // Query 2 + res, err = suite.baseApp.Query(context.TODO(), &abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight - 1}) + require.NoError(t, err) + require.NotNil(t, res, "height: %d", lastExistingHeight-1) + + if tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { + // With prune nothing or default, we query height 0 which translates to the latest height. + require.NotNil(t, res.Value, "height: %d", lastExistingHeight-1) + } + }) + } +} + +func TestABCI_LoadSnapshotChunk(t *testing.T) { + ssCfg := SnapshotsConfig{ + blocks: 2, + blockTxs: 5, + snapshotInterval: 2, + snapshotKeepRecent: snapshottypes.CurrentFormat, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + suite := NewBaseAppSuiteWithSnapshots(t, ssCfg) + + testCases := map[string]struct { + height uint64 + format uint32 + chunk uint32 + expectEmpty bool + }{ + "Existing snapshot": {2, snapshottypes.CurrentFormat, 1, false}, + "Missing height": {100, snapshottypes.CurrentFormat, 1, true}, + "Missing format": {2, snapshottypes.CurrentFormat + 1, 1, true}, + "Missing chunk": {2, snapshottypes.CurrentFormat, 9, true}, + "Zero height": {0, snapshottypes.CurrentFormat, 1, true}, + "Zero format": {2, 0, 1, true}, + "Zero chunk": {2, snapshottypes.CurrentFormat, 0, false}, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + resp, _ := suite.baseApp.LoadSnapshotChunk(&abci.RequestLoadSnapshotChunk{ + Height: tc.height, + Format: tc.format, + Chunk: tc.chunk, + }) + if tc.expectEmpty { + require.Equal(t, &abci.ResponseLoadSnapshotChunk{}, resp) + return + } + + require.NotEmpty(t, resp.Chunk) + }) + } +} + +func TestABCI_OfferSnapshot_Errors(t *testing.T) { + ssCfg := SnapshotsConfig{ + blocks: 0, + blockTxs: 0, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + suite := NewBaseAppSuiteWithSnapshots(t, ssCfg) + + m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} + metadata, err := m.Marshal() + require.NoError(t, err) + + hash := []byte{1, 2, 3} + + testCases := map[string]struct { + snapshot *abci.Snapshot + result abci.ResponseOfferSnapshot_Result + }{ + "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, + "invalid format": {&abci.Snapshot{ + Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, + "incorrect chunk count": {&abci.Snapshot{ + Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 2, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "no chunks": {&abci.Snapshot{ + Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "invalid metadata serialization": {&abci.Snapshot{ + Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, + }, abci.ResponseOfferSnapshot_REJECT}, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + resp, err := suite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) + require.NoError(t, err) + require.Equal(t, tc.result, resp.Result) + }) + } + + // Offering a snapshot after one has been accepted should error + resp, err := suite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 1, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.NoError(t, err) + require.Equal(t, &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) + + resp, err = suite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 2, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.NoError(t, err) + require.Equal(t, &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) +} + +func TestABCI_ApplySnapshotChunk(t *testing.T) { + srcCfg := SnapshotsConfig{ + blocks: 4, + blockTxs: 10, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + srcSuite := NewBaseAppSuiteWithSnapshots(t, srcCfg) + + targetCfg := SnapshotsConfig{ + blocks: 0, + blockTxs: 0, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + targetSuite := NewBaseAppSuiteWithSnapshots(t, targetCfg) + + // fetch latest snapshot to restore + respList, err := srcSuite.baseApp.ListSnapshots(&abci.RequestListSnapshots{}) + require.NoError(t, err) + require.NotEmpty(t, respList.Snapshots) + snapshot := respList.Snapshots[0] + + // make sure the snapshot has at least 3 chunks + require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") + + // begin a snapshot restoration in the target + respOffer, err := targetSuite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: snapshot}) + require.NoError(t, err) + require.Equal(t, &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) + + // We should be able to pass an invalid chunk and get a verify failure, before + // reapplying it. + respApply, err := targetSuite.baseApp.ApplySnapshotChunk(&abci.RequestApplySnapshotChunk{ + Index: 0, + Chunk: []byte{9}, + Sender: "sender", + }) + require.NoError(t, err) + require.Equal(t, &abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_RETRY, + RefetchChunks: []uint32{0}, + RejectSenders: []string{"sender"}, + }, respApply) + + // fetch each chunk from the source and apply it to the target + for index := uint32(0); index < snapshot.Chunks; index++ { + respChunk, err := srcSuite.baseApp.LoadSnapshotChunk(&abci.RequestLoadSnapshotChunk{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunk: index, + }) + require.NoError(t, err) + require.NotNil(t, respChunk.Chunk) + + respApply, err := targetSuite.baseApp.ApplySnapshotChunk(&abci.RequestApplySnapshotChunk{ + Index: index, + Chunk: respChunk.Chunk, + }) + require.NoError(t, err) + require.Equal(t, &abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }, respApply) + } + + // the target should now have the same hash as the source + require.Equal(t, srcSuite.baseApp.LastCommitID(), targetSuite.baseApp.LastCommitID()) +} diff --git a/baseapp/state.go b/baseapp/state.go new file mode 100644 index 0000000..31e848c --- /dev/null +++ b/baseapp/state.go @@ -0,0 +1,36 @@ +package baseapp + +import ( + "sync" + + storetypes "cosmossdk.io/store/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type state struct { + ms storetypes.CacheMultiStore + + mtx sync.RWMutex + ctx sdk.Context +} + +// CacheMultiStore calls and returns a CacheMultiStore on the state's underling +// CacheMultiStore. +func (st *state) CacheMultiStore() storetypes.CacheMultiStore { + return st.ms.CacheMultiStore() +} + +// SetContext updates the state's context to the context provided. +func (st *state) SetContext(ctx sdk.Context) { + st.mtx.Lock() + defer st.mtx.Unlock() + st.ctx = ctx +} + +// Context returns the Context of the state. +func (st *state) Context() sdk.Context { + st.mtx.RLock() + defer st.mtx.RUnlock() + return st.ctx +} diff --git a/baseapp/streaming.go b/baseapp/streaming.go new file mode 100644 index 0000000..5c2ddfc --- /dev/null +++ b/baseapp/streaming.go @@ -0,0 +1,108 @@ +package baseapp + +import ( + "fmt" + "slices" + "sort" + "strings" + + "github.com/spf13/cast" + + "cosmossdk.io/store/streaming" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/client/flags" + servertypes "github.com/cosmos/cosmos-sdk/server/types" +) + +const ( + StreamingTomlKey = "streaming" + StreamingABCITomlKey = "abci" + StreamingABCIPluginTomlKey = "plugin" + StreamingABCIKeysTomlKey = "keys" + StreamingABCIStopNodeOnErrTomlKey = "stop-node-on-err" +) + +// RegisterStreamingServices registers streaming services with the BaseApp. +func (app *BaseApp) RegisterStreamingServices(appOpts servertypes.AppOptions, keys map[string]*storetypes.KVStoreKey) error { + // register streaming services + streamingCfg := cast.ToStringMap(appOpts.Get(StreamingTomlKey)) + for service := range streamingCfg { + pluginKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, service, StreamingABCIPluginTomlKey) + pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey))) + if len(pluginName) > 0 { + logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel)) + plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel) + if err != nil { + return fmt.Errorf("failed to load streaming plugin: %w", err) + } + if err := app.registerStreamingPlugin(appOpts, keys, plugin); err != nil { + return fmt.Errorf("failed to register streaming plugin %w", err) + } + } + } + + return nil +} + +// registerStreamingPlugin registers streaming plugins with the BaseApp. +func (app *BaseApp) registerStreamingPlugin( + appOpts servertypes.AppOptions, + keys map[string]*storetypes.KVStoreKey, + streamingPlugin any, +) error { + v, ok := streamingPlugin.(storetypes.ABCIListener) + if !ok { + return fmt.Errorf("unexpected plugin type %T", v) + } + + app.registerABCIListenerPlugin(appOpts, keys, v) + return nil +} + +// registerABCIListenerPlugin registers plugins that implement the ABCIListener interface. +func (app *BaseApp) registerABCIListenerPlugin( + appOpts servertypes.AppOptions, + keys map[string]*storetypes.KVStoreKey, + abciListener storetypes.ABCIListener, +) { + stopNodeOnErrKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIStopNodeOnErrTomlKey) + stopNodeOnErr := cast.ToBool(appOpts.Get(stopNodeOnErrKey)) + keysKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIKeysTomlKey) + exposeKeysStr := cast.ToStringSlice(appOpts.Get(keysKey)) + exposedKeys := exposeStoreKeysSorted(exposeKeysStr, keys) + app.cms.AddListeners(exposedKeys) + app.SetStreamingManager( + storetypes.StreamingManager{ + ABCIListeners: []storetypes.ABCIListener{abciListener}, + StopNodeOnErr: stopNodeOnErr, + }, + ) +} + +func exposeAll(list []string) bool { + return slices.Contains(list, "*") +} + +func exposeStoreKeysSorted(keysStr []string, keys map[string]*storetypes.KVStoreKey) []storetypes.StoreKey { + var exposeStoreKeys []storetypes.StoreKey + if exposeAll(keysStr) { + exposeStoreKeys = make([]storetypes.StoreKey, 0, len(keys)) + for key := range keys { + exposeStoreKeys = append(exposeStoreKeys, keys[key]) + } + } else { + exposeStoreKeys = make([]storetypes.StoreKey, 0, len(keysStr)) + for _, keyStr := range keysStr { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } + // sort storeKeys for deterministic output + sort.SliceStable(exposeStoreKeys, func(i, j int) bool { + return exposeStoreKeys[i].Name() < exposeStoreKeys[j].Name() + }) + + return exposeStoreKeys +} diff --git a/baseapp/streaming_test.go b/baseapp/streaming_test.go new file mode 100644 index 0000000..36e60fc --- /dev/null +++ b/baseapp/streaming_test.go @@ -0,0 +1,151 @@ +package baseapp_test + +import ( + "context" + "fmt" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + tmproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/stretchr/testify/require" + + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" +) + +var _ storetypes.ABCIListener = (*MockABCIListener)(nil) + +type MockABCIListener struct { + name string + ChangeSet []*storetypes.StoreKVPair +} + +func NewMockABCIListener(name string) MockABCIListener { + return MockABCIListener{ + name: name, + ChangeSet: make([]*storetypes.StoreKVPair, 0), + } +} + +func (m MockABCIListener) ListenFinalizeBlock(_ context.Context, _ abci.RequestFinalizeBlock, _ abci.ResponseFinalizeBlock) error { + return nil +} + +func (m *MockABCIListener) ListenCommit(_ context.Context, _ abci.ResponseCommit, cs []*storetypes.StoreKVPair) error { + m.ChangeSet = cs + return nil +} + +var distKey1 = storetypes.NewKVStoreKey("distKey1") + +func TestABCI_MultiListener_StateChanges(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + distOpt := func(bapp *baseapp.BaseApp) { bapp.MountStores(distKey1) } + mockListener1 := NewMockABCIListener("lis_1") + mockListener2 := NewMockABCIListener("lis_2") + streamingManager := storetypes.StreamingManager{ABCIListeners: []storetypes.ABCIListener{&mockListener1, &mockListener2}} + streamingManagerOpt := func(bapp *baseapp.BaseApp) { bapp.SetStreamingManager(streamingManager) } + addListenerOpt := func(bapp *baseapp.BaseApp) { bapp.CommitMultiStore().AddListeners([]storetypes.StoreKey{distKey1}) } + suite := NewBaseAppSuite(t, anteOpt, distOpt, streamingManagerOpt, addListenerOpt) + + _, err := suite.baseApp.InitChain( + &abci.RequestInitChain{ + ConsensusParams: &tmproto.ConsensusParams{}, + }, + ) + require.NoError(t, err) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := range nBlocks { + txs := [][]byte{} + + var expectedChangeSet []*storetypes.StoreKVPair + + // create final block context state + _, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1, Txs: txs}) + require.NoError(t, err) + + for i := range txPerHeight { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(t, suite.txConfig, counter, counter) + + txBytes, err := suite.txConfig.TxEncoder()(tx) + require.NoError(t, err) + + sKey := fmt.Appendf(nil, "distKey%d", i) + sVal := fmt.Appendf(nil, "distVal%d", i) + store := getFinalizeBlockStateCtx(suite.baseApp).KVStore(distKey1) + store.Set(sKey, sVal) + + expectedChangeSet = append(expectedChangeSet, &storetypes.StoreKVPair{ + StoreKey: distKey1.Name(), + Delete: false, + Key: sKey, + Value: sVal, + }) + + txs = append(txs, txBytes) + } + + res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1, Txs: txs}) + require.NoError(t, err) + for _, tx := range res.TxResults { + events := tx.GetEvents() + require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") + // require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") + // require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") + } + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + + require.Equal(t, expectedChangeSet, mockListener1.ChangeSet, "should contain the same changeSet") + require.Equal(t, expectedChangeSet, mockListener2.ChangeSet, "should contain the same changeSet") + } +} + +func Test_Ctx_with_StreamingManager(t *testing.T) { + mockListener1 := NewMockABCIListener("lis_1") + mockListener2 := NewMockABCIListener("lis_2") + listeners := []storetypes.ABCIListener{&mockListener1, &mockListener2} + streamingManager := storetypes.StreamingManager{ABCIListeners: listeners, StopNodeOnErr: true} + streamingManagerOpt := func(bapp *baseapp.BaseApp) { bapp.SetStreamingManager(streamingManager) } + addListenerOpt := func(bapp *baseapp.BaseApp) { bapp.CommitMultiStore().AddListeners([]storetypes.StoreKey{distKey1}) } + suite := NewBaseAppSuite(t, streamingManagerOpt, addListenerOpt) + + _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ + ConsensusParams: &tmproto.ConsensusParams{}, + }) + require.NoError(t, err) + + ctx := getFinalizeBlockStateCtx(suite.baseApp) + sm := ctx.StreamingManager() + require.NotNil(t, sm, fmt.Sprintf("nil StreamingManager: %v", sm)) + require.Equal(t, listeners, sm.ABCIListeners, fmt.Sprintf("should contain same listeners: %v", listeners)) + require.Equal(t, true, sm.StopNodeOnErr, "should contain StopNodeOnErr = true") + + nBlocks := 2 + + for blockN := range nBlocks { + + _, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1}) + require.NoError(t, err) + + ctx := getFinalizeBlockStateCtx(suite.baseApp) + sm := ctx.StreamingManager() + require.NotNil(t, sm, fmt.Sprintf("nil StreamingManager: %v", sm)) + require.Equal(t, listeners, sm.ABCIListeners, fmt.Sprintf("should contain same listeners: %v", listeners)) + require.Equal(t, true, sm.StopNodeOnErr, "should contain StopNodeOnErr = true") + + _, err = suite.baseApp.Commit() + require.NoError(t, err) + } +} diff --git a/baseapp/test_helpers.go b/baseapp/test_helpers.go new file mode 100644 index 0000000..fc3d11c --- /dev/null +++ b/baseapp/test_helpers.go @@ -0,0 +1,85 @@ +package baseapp + +import ( + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// SimCheck defines a CheckTx helper function that used in tests and simulations. +func (app *BaseApp) SimCheck(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk.Result, error) { + // runTx expects tx bytes as argument, so we encode the tx argument into + // bytes. Note that runTx will actually decode those bytes again. But since + // this helper is only used in tests/simulation, it's fine. + bz, err := txEncoder(tx) + if err != nil { + return sdk.GasInfo{}, nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) + } + + gasInfo, result, _, err := app.runTx(execModeCheck, bz, tx) + return gasInfo, result, err +} + +// Simulate executes a tx in simulate mode to get result and gas info. +func (app *BaseApp) Simulate(txBytes []byte) (sdk.GasInfo, *sdk.Result, error) { + gasInfo, result, _, err := app.runTx(execModeSimulate, txBytes, nil) + return gasInfo, result, err +} + +func (app *BaseApp) SimDeliver(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk.Result, error) { + // See comment for Check(). + bz, err := txEncoder(tx) + if err != nil { + return sdk.GasInfo{}, nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) + } + + gasInfo, result, _, err := app.runTx(execModeFinalize, bz, tx) + return gasInfo, result, err +} + +func (app *BaseApp) SimTxFinalizeBlock(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk.Result, error) { + // See comment for Check(). + bz, err := txEncoder(tx) + if err != nil { + return sdk.GasInfo{}, nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) + } + + gasInfo, result, _, err := app.runTx(execModeFinalize, bz, tx) + return gasInfo, result, err +} + +// SimWriteState is an entrypoint for simulations only. They are not executed during the normal ABCI finalize +// block step but later. Therefor an extra call to the root multi-store (app.cms) is required to write the changes. +func (app *BaseApp) SimWriteState() { + app.finalizeBlockState.ms.Write() +} + +// NewContextLegacy returns a new sdk.Context with the provided header +func (app *BaseApp) NewContextLegacy(isCheckTx bool, header cmtproto.Header) sdk.Context { + if isCheckTx { + return sdk.NewContext(app.checkState.ms, header, true, app.logger). + WithMinGasPrices(app.minGasPrices) + } + + return sdk.NewContext(app.finalizeBlockState.ms, header, false, app.logger) +} + +// NewContext returns a new sdk.Context with a empty header +func (app *BaseApp) NewContext(isCheckTx bool) sdk.Context { + return app.NewContextLegacy(isCheckTx, cmtproto.Header{}) +} + +func (app *BaseApp) NewUncachedContext(isCheckTx bool, header cmtproto.Header) sdk.Context { + return sdk.NewContext(app.cms, header, isCheckTx, app.logger) +} + +func (app *BaseApp) GetContextForFinalizeBlock(txBytes []byte) sdk.Context { + return app.getContextForTx(execModeFinalize, txBytes) +} + +func (app *BaseApp) GetContextForCheckTx(txBytes []byte) sdk.Context { + return app.getContextForTx(execModeCheck, txBytes) +} diff --git a/baseapp/testutil/buf.gen.yaml b/baseapp/testutil/buf.gen.yaml new file mode 100644 index 0000000..a7f430e --- /dev/null +++ b/baseapp/testutil/buf.gen.yaml @@ -0,0 +1,5 @@ +version: v1 +plugins: + - name: gocosmos + out: ../.. + opt: plugins=grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types diff --git a/baseapp/testutil/buf.lock b/baseapp/testutil/buf.lock new file mode 100644 index 0000000..e786d26 --- /dev/null +++ b/baseapp/testutil/buf.lock @@ -0,0 +1,23 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + commit: 1935555c206d4afb9e94615dfd0fad31 + digest: shake256:c74d91a3ac7ae07d579e90eee33abf9b29664047ac8816500cf22c081fec0d72d62c89ce0bebafc1f6fec7aa5315be72606717740ca95007248425102c365377 + - remote: buf.build + owner: cosmos + repository: cosmos-sdk + commit: 9d547dbea90f47afbe1898388fcebffb + digest: shake256:63237398fb2043153c81bbe91ce52a832bca02d4307334b62fcc9914ce6f12fea59388eb5102949255054973f7022f581e02f97ed1f69a6585d2d00fb1da5833 + - remote: buf.build + owner: cosmos + repository: gogo-proto + commit: 5e5b9fdd01804356895f8f79a6f1ddc1 + digest: shake256:0b85da49e2e5f9ebc4806eae058e2f56096ff3b1c59d1fb7c190413dd15f45dd456f0b69ced9059341c80795d2b6c943de15b120a9e0308b499e43e4b5fc2952 + - remote: buf.build + owner: googleapis + repository: googleapis + commit: cc916c31859748a68fd229a3c8d7a2e8 + digest: shake256:469b049d0eb04203d5272062636c078decefc96fec69739159c25d85349c50c34c7706918a8b216c5c27f76939df48452148cff8c5c3ae77fa6ba5c25c1b8bf8 diff --git a/baseapp/testutil/buf.yaml b/baseapp/testutil/buf.yaml new file mode 100644 index 0000000..0a4c311 --- /dev/null +++ b/baseapp/testutil/buf.yaml @@ -0,0 +1,5 @@ +version: v1 +deps: + - buf.build/cosmos/cosmos-sdk + - buf.build/cosmos/gogo-proto + - buf.build/cosmos/cosmos-proto diff --git a/baseapp/testutil/messages.go b/baseapp/testutil/messages.go new file mode 100644 index 0000000..89807c4 --- /dev/null +++ b/baseapp/testutil/messages.go @@ -0,0 +1,65 @@ +package testutil + +import ( + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +func RegisterInterfaces(registry types.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgCounter{}, + &MsgCounter2{}, + &MsgKeyValue{}, + ) + msgservice.RegisterMsgServiceDesc(registry, &_Counter_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_Counter2_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_KeyValue_serviceDesc) + + codec.RegisterInterfaces(registry) +} + +var _ sdk.Msg = &MsgCounter{} + +func (msg *MsgCounter) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return errorsmod.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgCounter2{} + +func (msg *MsgCounter2) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter2) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return errorsmod.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgKeyValue{} + +func (msg *MsgKeyValue) GetSigners() []sdk.AccAddress { + if len(msg.Signer) == 0 { + return []sdk.AccAddress{} + } + + return []sdk.AccAddress{sdk.MustAccAddressFromBech32(msg.Signer)} +} + +func (msg *MsgKeyValue) ValidateBasic() error { + if msg.Key == nil { + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") + } + if msg.Value == nil { + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") + } + return nil +} diff --git a/baseapp/testutil/messages.pb.go b/baseapp/testutil/messages.pb.go new file mode 100644 index 0000000..cdc28e2 --- /dev/null +++ b/baseapp/testutil/messages.pb.go @@ -0,0 +1,1400 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: messages.proto + +package testutil + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/codec/types" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MsgCounter struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgCounter) Reset() { *m = MsgCounter{} } +func (m *MsgCounter) String() string { return proto.CompactTextString(m) } +func (*MsgCounter) ProtoMessage() {} +func (*MsgCounter) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{0} +} +func (m *MsgCounter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter.Merge(m, src) +} +func (m *MsgCounter) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter proto.InternalMessageInfo + +func (m *MsgCounter) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +func (m *MsgCounter) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +type MsgCounter2 struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgCounter2) Reset() { *m = MsgCounter2{} } +func (m *MsgCounter2) String() string { return proto.CompactTextString(m) } +func (*MsgCounter2) ProtoMessage() {} +func (*MsgCounter2) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{1} +} +func (m *MsgCounter2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter2) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter2.Merge(m, src) +} +func (m *MsgCounter2) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter2) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter2.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter2 proto.InternalMessageInfo + +func (m *MsgCounter2) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter2) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +func (m *MsgCounter2) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +type MsgCreateCounterResponse struct { +} + +func (m *MsgCreateCounterResponse) Reset() { *m = MsgCreateCounterResponse{} } +func (m *MsgCreateCounterResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateCounterResponse) ProtoMessage() {} +func (*MsgCreateCounterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{2} +} +func (m *MsgCreateCounterResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateCounterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateCounterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateCounterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateCounterResponse.Merge(m, src) +} +func (m *MsgCreateCounterResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateCounterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateCounterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateCounterResponse proto.InternalMessageInfo + +type MsgKeyValue struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgKeyValue) Reset() { *m = MsgKeyValue{} } +func (m *MsgKeyValue) String() string { return proto.CompactTextString(m) } +func (*MsgKeyValue) ProtoMessage() {} +func (*MsgKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{3} +} +func (m *MsgKeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgKeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgKeyValue.Merge(m, src) +} +func (m *MsgKeyValue) XXX_Size() int { + return m.Size() +} +func (m *MsgKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_MsgKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgKeyValue proto.InternalMessageInfo + +func (m *MsgKeyValue) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MsgKeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MsgKeyValue) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +type MsgCreateKeyValueResponse struct { +} + +func (m *MsgCreateKeyValueResponse) Reset() { *m = MsgCreateKeyValueResponse{} } +func (m *MsgCreateKeyValueResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateKeyValueResponse) ProtoMessage() {} +func (*MsgCreateKeyValueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{4} +} +func (m *MsgCreateKeyValueResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateKeyValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateKeyValueResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateKeyValueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateKeyValueResponse.Merge(m, src) +} +func (m *MsgCreateKeyValueResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateKeyValueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateKeyValueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateKeyValueResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCounter)(nil), "MsgCounter") + proto.RegisterType((*MsgCounter2)(nil), "MsgCounter2") + proto.RegisterType((*MsgCreateCounterResponse)(nil), "MsgCreateCounterResponse") + proto.RegisterType((*MsgKeyValue)(nil), "MsgKeyValue") + proto.RegisterType((*MsgCreateKeyValueResponse)(nil), "MsgCreateKeyValueResponse") +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor_4dc296cbfe5ffcd5) } + +var fileDescriptor_4dc296cbfe5ffcd5 = []byte{ + // 390 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x92, 0xcf, 0xaa, 0xd3, 0x40, + 0x14, 0xc6, 0x1b, 0x83, 0x6d, 0x3d, 0xad, 0x5a, 0x42, 0xd1, 0x34, 0x42, 0x28, 0x5d, 0x48, 0x11, + 0x9a, 0xc1, 0xb8, 0x6b, 0x77, 0x8a, 0x54, 0x11, 0x11, 0x22, 0xb8, 0xe8, 0xa6, 0x4c, 0xd2, 0xd3, + 0x69, 0x68, 0x32, 0x13, 0x32, 0x93, 0x42, 0xb7, 0x3e, 0x81, 0x8f, 0xe2, 0x63, 0xb8, 0xec, 0xd2, + 0xa5, 0xb4, 0x0b, 0x5f, 0x43, 0xf2, 0xaf, 0x75, 0x71, 0x7b, 0xb9, 0xab, 0xbb, 0x9a, 0xf3, 0x7d, + 0x87, 0x9c, 0xdf, 0xc9, 0xc7, 0x81, 0x27, 0x31, 0x4a, 0x49, 0x19, 0x4a, 0x27, 0x49, 0x85, 0x12, + 0x56, 0x9f, 0x09, 0x26, 0x8a, 0x92, 0xe4, 0x55, 0xe5, 0x0e, 0x98, 0x10, 0x2c, 0x42, 0x52, 0x28, + 0x3f, 0x5b, 0x13, 0xca, 0xf7, 0x55, 0xeb, 0x79, 0x20, 0x64, 0x2c, 0x24, 0x89, 0x25, 0x23, 0xbb, + 0xd7, 0xf9, 0x53, 0x36, 0x46, 0x12, 0xe0, 0xb3, 0x64, 0xef, 0x44, 0xc6, 0x15, 0xa6, 0x86, 0x09, + 0xad, 0xa0, 0x2c, 0x4d, 0x6d, 0xa8, 0x8d, 0x75, 0xaf, 0x96, 0xc6, 0x4b, 0x78, 0xba, 0xa6, 0x61, + 0xb4, 0x14, 0x7c, 0xb9, 0xa1, 0x7c, 0x15, 0x61, 0x6a, 0x3e, 0x18, 0x6a, 0xe3, 0xb6, 0xf7, 0x38, + 0xb7, 0xbf, 0xf0, 0x0f, 0xa5, 0x69, 0x3c, 0x83, 0xa6, 0x0c, 0x19, 0xc7, 0xd4, 0xd4, 0x87, 0xda, + 0xf8, 0x91, 0x57, 0xa9, 0x69, 0xe7, 0xfb, 0xdf, 0x9f, 0xaf, 0x2a, 0x31, 0x52, 0xd0, 0xb9, 0x40, + 0xdd, 0xfb, 0xa2, 0x5a, 0x60, 0xe6, 0xd4, 0x14, 0xa9, 0xc2, 0x8a, 0xed, 0xa1, 0x4c, 0x04, 0x97, + 0x38, 0x5a, 0x14, 0x1b, 0x7d, 0xc2, 0xfd, 0x37, 0x1a, 0x65, 0x68, 0xf4, 0x40, 0xdf, 0xe2, 0xbe, + 0xd8, 0xa6, 0xeb, 0xe5, 0xa5, 0xd1, 0x87, 0x87, 0xbb, 0xbc, 0x55, 0xf0, 0xbb, 0x5e, 0x29, 0xee, + 0xc6, 0x7d, 0x01, 0x83, 0x33, 0xb7, 0x26, 0xd4, 0x60, 0xf7, 0x3d, 0xb4, 0xea, 0xf0, 0xa7, 0xd0, + 0xfb, 0xc8, 0x83, 0x14, 0x63, 0xe4, 0xaa, 0xf6, 0x3a, 0xce, 0x25, 0x28, 0x6b, 0xe0, 0x5c, 0xdb, + 0xdf, 0x9d, 0x43, 0xfb, 0x1c, 0xe7, 0xec, 0x86, 0x39, 0xdd, 0xff, 0xe6, 0xb8, 0xb7, 0x0d, 0x9a, + 0x41, 0xfb, 0x9c, 0x02, 0x01, 0xfd, 0x2b, 0xaa, 0xf2, 0xdb, 0xda, 0xb4, 0x2c, 0xe7, 0xea, 0xcf, + 0xbc, 0x9d, 0xff, 0x3a, 0xda, 0xda, 0xe1, 0x68, 0x6b, 0x7f, 0x8e, 0xb6, 0xf6, 0xe3, 0x64, 0x37, + 0x0e, 0x27, 0xbb, 0xf1, 0xfb, 0x64, 0x37, 0x16, 0x13, 0x16, 0xaa, 0x4d, 0xe6, 0x3b, 0x81, 0x88, + 0x49, 0x75, 0x8a, 0xe5, 0x33, 0x91, 0xab, 0x2d, 0xf1, 0xa9, 0x44, 0x9a, 0x24, 0x44, 0xa1, 0x54, + 0x99, 0x0a, 0x23, 0xbf, 0x59, 0x1c, 0xe7, 0x9b, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x44, 0x91, + 0x2d, 0xb3, 0xf8, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// CounterClient is the client API for Counter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CounterClient interface { + IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counterClient struct { + cc grpc1.ClientConn +} + +func NewCounterClient(cc grpc1.ClientConn) CounterClient { + return &counterClient{cc} +} + +func (c *counterClient) IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/Counter/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CounterServer is the server API for Counter service. +type CounterServer interface { + IncrementCounter(context.Context, *MsgCounter) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounterServer can be embedded to have forward compatible implementations. +type UnimplementedCounterServer struct { +} + +func (*UnimplementedCounterServer) IncrementCounter(ctx context.Context, req *MsgCounter) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounterServer(s grpc1.Server, srv CounterServer) { + s.RegisterService(&_Counter_serviceDesc, srv) +} + +func _Counter_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CounterServer).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/Counter/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CounterServer).IncrementCounter(ctx, req.(*MsgCounter)) + } + return interceptor(ctx, in, info, handler) +} + +var Counter_serviceDesc = _Counter_serviceDesc +var _Counter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "Counter", + HandlerType: (*CounterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// Counter2Client is the client API for Counter2 service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type Counter2Client interface { + IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counter2Client struct { + cc grpc1.ClientConn +} + +func NewCounter2Client(cc grpc1.ClientConn) Counter2Client { + return &counter2Client{cc} +} + +func (c *counter2Client) IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/Counter2/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Counter2Server is the server API for Counter2 service. +type Counter2Server interface { + IncrementCounter(context.Context, *MsgCounter2) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounter2Server can be embedded to have forward compatible implementations. +type UnimplementedCounter2Server struct { +} + +func (*UnimplementedCounter2Server) IncrementCounter(ctx context.Context, req *MsgCounter2) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounter2Server(s grpc1.Server, srv Counter2Server) { + s.RegisterService(&_Counter2_serviceDesc, srv) +} + +func _Counter2_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Counter2Server).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/Counter2/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Counter2Server).IncrementCounter(ctx, req.(*MsgCounter2)) + } + return interceptor(ctx, in, info, handler) +} + +var Counter2_serviceDesc = _Counter2_serviceDesc +var _Counter2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "Counter2", + HandlerType: (*Counter2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter2_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// KeyValueClient is the client API for KeyValue service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyValueClient interface { + Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) +} + +type keyValueClient struct { + cc grpc1.ClientConn +} + +func NewKeyValueClient(cc grpc1.ClientConn) KeyValueClient { + return &keyValueClient{cc} +} + +func (c *keyValueClient) Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) { + out := new(MsgCreateKeyValueResponse) + err := c.cc.Invoke(ctx, "/KeyValue/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyValueServer is the server API for KeyValue service. +type KeyValueServer interface { + Set(context.Context, *MsgKeyValue) (*MsgCreateKeyValueResponse, error) +} + +// UnimplementedKeyValueServer can be embedded to have forward compatible implementations. +type UnimplementedKeyValueServer struct { +} + +func (*UnimplementedKeyValueServer) Set(ctx context.Context, req *MsgKeyValue) (*MsgCreateKeyValueResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} + +func RegisterKeyValueServer(s grpc1.Server, srv KeyValueServer) { + s.RegisterService(&_KeyValue_serviceDesc, srv) +} + +func _KeyValue_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgKeyValue) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyValueServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/KeyValue/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyValueServer).Set(ctx, req.(*MsgKeyValue)) + } + return interceptor(ctx, in, info, handler) +} + +var KeyValue_serviceDesc = _KeyValue_serviceDesc +var _KeyValue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "KeyValue", + HandlerType: (*KeyValueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _KeyValue_Set_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +func (m *MsgCounter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCounter2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateCounterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateCounterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateCounterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgKeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgKeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateKeyValueResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateKeyValueResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateKeyValueResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMessages(dAtA []byte, offset int, v uint64) int { + offset -= sovMessages(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCounter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *MsgCounter2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *MsgCreateCounterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgKeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *MsgCreateKeyValueResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMessages(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessages(x uint64) (n int) { + return sovMessages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCounter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCounter2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateCounterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateCounterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateCounterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgKeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgKeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateKeyValueResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessages(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessages + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessages + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessages + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessages = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessages = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessages = fmt.Errorf("proto: unexpected end of group") +) diff --git a/baseapp/testutil/messages.proto b/baseapp/testutil/messages.proto new file mode 100644 index 0000000..1ef8a4b --- /dev/null +++ b/baseapp/testutil/messages.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "cosmos/msg/v1/msg.proto"; + +option go_package = "github.com/cosmos/cosmos-sdk/baseapp/testutil"; + +message MsgCounter { + option (cosmos.msg.v1.signer) = "signer"; + + int64 counter = 1; + bool fail_on_handler = 2; + string signer = 3; +} + +message MsgCounter2 { + option (cosmos.msg.v1.signer) = "signer"; + + int64 counter = 1; + bool fail_on_handler = 2; + string signer = 3; +} + +message MsgCreateCounterResponse {} + +message MsgKeyValue { + option (cosmos.msg.v1.signer) = "signer"; + + bytes key = 1; + bytes value = 2; + string signer = 3; +} + +message MsgCreateKeyValueResponse {} + +service Counter { + rpc IncrementCounter(MsgCounter) returns (MsgCreateCounterResponse); +} + +service Counter2 { + rpc IncrementCounter(MsgCounter2) returns (MsgCreateCounterResponse); +} + +service KeyValue { + rpc Set(MsgKeyValue) returns (MsgCreateKeyValueResponse); +} \ No newline at end of file diff --git a/baseapp/testutil/mock/mocks.go b/baseapp/testutil/mock/mocks.go new file mode 100644 index 0000000..b2eb1bf --- /dev/null +++ b/baseapp/testutil/mock/mocks.go @@ -0,0 +1,244 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: baseapp/abci_utils.go +// +// Generated by this command: +// +// mockgen -source=baseapp/abci_utils.go -package mock -destination baseapp/testutil/mock/mocks.go +// + +// Package mock is a generated GoMock package. +package mock + +import ( + context "context" + reflect "reflect" + + crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" + types "github.com/cosmos/cosmos-sdk/types" + gomock "go.uber.org/mock/gomock" +) + +// MockValidatorStore is a mock of ValidatorStore interface. +type MockValidatorStore struct { + ctrl *gomock.Controller + recorder *MockValidatorStoreMockRecorder + isgomock struct{} +} + +// MockValidatorStoreMockRecorder is the mock recorder for MockValidatorStore. +type MockValidatorStoreMockRecorder struct { + mock *MockValidatorStore +} + +// NewMockValidatorStore creates a new mock instance. +func NewMockValidatorStore(ctrl *gomock.Controller) *MockValidatorStore { + mock := &MockValidatorStore{ctrl: ctrl} + mock.recorder = &MockValidatorStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockValidatorStore) EXPECT() *MockValidatorStoreMockRecorder { + return m.recorder +} + +// GetPubKeyByConsAddr mocks base method. +func (m *MockValidatorStore) GetPubKeyByConsAddr(arg0 context.Context, arg1 types.ConsAddress) (crypto.PublicKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPubKeyByConsAddr", arg0, arg1) + ret0, _ := ret[0].(crypto.PublicKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPubKeyByConsAddr indicates an expected call of GetPubKeyByConsAddr. +func (mr *MockValidatorStoreMockRecorder) GetPubKeyByConsAddr(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPubKeyByConsAddr", reflect.TypeOf((*MockValidatorStore)(nil).GetPubKeyByConsAddr), arg0, arg1) +} + +// MockGasTx is a mock of GasTx interface. +type MockGasTx struct { + ctrl *gomock.Controller + recorder *MockGasTxMockRecorder + isgomock struct{} +} + +// MockGasTxMockRecorder is the mock recorder for MockGasTx. +type MockGasTxMockRecorder struct { + mock *MockGasTx +} + +// NewMockGasTx creates a new mock instance. +func NewMockGasTx(ctrl *gomock.Controller) *MockGasTx { + mock := &MockGasTx{ctrl: ctrl} + mock.recorder = &MockGasTxMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGasTx) EXPECT() *MockGasTxMockRecorder { + return m.recorder +} + +// GetGas mocks base method. +func (m *MockGasTx) GetGas() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetGas") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetGas indicates an expected call of GetGas. +func (mr *MockGasTxMockRecorder) GetGas() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGas", reflect.TypeOf((*MockGasTx)(nil).GetGas)) +} + +// MockProposalTxVerifier is a mock of ProposalTxVerifier interface. +type MockProposalTxVerifier struct { + ctrl *gomock.Controller + recorder *MockProposalTxVerifierMockRecorder + isgomock struct{} +} + +// MockProposalTxVerifierMockRecorder is the mock recorder for MockProposalTxVerifier. +type MockProposalTxVerifierMockRecorder struct { + mock *MockProposalTxVerifier +} + +// NewMockProposalTxVerifier creates a new mock instance. +func NewMockProposalTxVerifier(ctrl *gomock.Controller) *MockProposalTxVerifier { + mock := &MockProposalTxVerifier{ctrl: ctrl} + mock.recorder = &MockProposalTxVerifierMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockProposalTxVerifier) EXPECT() *MockProposalTxVerifierMockRecorder { + return m.recorder +} + +// PrepareProposalVerifyTx mocks base method. +func (m *MockProposalTxVerifier) PrepareProposalVerifyTx(tx types.Tx) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrepareProposalVerifyTx", tx) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PrepareProposalVerifyTx indicates an expected call of PrepareProposalVerifyTx. +func (mr *MockProposalTxVerifierMockRecorder) PrepareProposalVerifyTx(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareProposalVerifyTx", reflect.TypeOf((*MockProposalTxVerifier)(nil).PrepareProposalVerifyTx), tx) +} + +// ProcessProposalVerifyTx mocks base method. +func (m *MockProposalTxVerifier) ProcessProposalVerifyTx(txBz []byte) (types.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProcessProposalVerifyTx", txBz) + ret0, _ := ret[0].(types.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProcessProposalVerifyTx indicates an expected call of ProcessProposalVerifyTx. +func (mr *MockProposalTxVerifierMockRecorder) ProcessProposalVerifyTx(txBz any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessProposalVerifyTx", reflect.TypeOf((*MockProposalTxVerifier)(nil).ProcessProposalVerifyTx), txBz) +} + +// TxDecode mocks base method. +func (m *MockProposalTxVerifier) TxDecode(txBz []byte) (types.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TxDecode", txBz) + ret0, _ := ret[0].(types.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TxDecode indicates an expected call of TxDecode. +func (mr *MockProposalTxVerifierMockRecorder) TxDecode(txBz any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxDecode", reflect.TypeOf((*MockProposalTxVerifier)(nil).TxDecode), txBz) +} + +// TxEncode mocks base method. +func (m *MockProposalTxVerifier) TxEncode(tx types.Tx) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TxEncode", tx) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// TxEncode indicates an expected call of TxEncode. +func (mr *MockProposalTxVerifierMockRecorder) TxEncode(tx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxEncode", reflect.TypeOf((*MockProposalTxVerifier)(nil).TxEncode), tx) +} + +// MockTxSelector is a mock of TxSelector interface. +type MockTxSelector struct { + ctrl *gomock.Controller + recorder *MockTxSelectorMockRecorder + isgomock struct{} +} + +// MockTxSelectorMockRecorder is the mock recorder for MockTxSelector. +type MockTxSelectorMockRecorder struct { + mock *MockTxSelector +} + +// NewMockTxSelector creates a new mock instance. +func NewMockTxSelector(ctrl *gomock.Controller) *MockTxSelector { + mock := &MockTxSelector{ctrl: ctrl} + mock.recorder = &MockTxSelectorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTxSelector) EXPECT() *MockTxSelectorMockRecorder { + return m.recorder +} + +// Clear mocks base method. +func (m *MockTxSelector) Clear() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Clear") +} + +// Clear indicates an expected call of Clear. +func (mr *MockTxSelectorMockRecorder) Clear() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockTxSelector)(nil).Clear)) +} + +// SelectTxForProposal mocks base method. +func (m *MockTxSelector) SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx types.Tx, txBz []byte) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SelectTxForProposal", ctx, maxTxBytes, maxBlockGas, memTx, txBz) + ret0, _ := ret[0].(bool) + return ret0 +} + +// SelectTxForProposal indicates an expected call of SelectTxForProposal. +func (mr *MockTxSelectorMockRecorder) SelectTxForProposal(ctx, maxTxBytes, maxBlockGas, memTx, txBz any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectTxForProposal", reflect.TypeOf((*MockTxSelector)(nil).SelectTxForProposal), ctx, maxTxBytes, maxBlockGas, memTx, txBz) +} + +// SelectedTxs mocks base method. +func (m *MockTxSelector) SelectedTxs(ctx context.Context) [][]byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SelectedTxs", ctx) + ret0, _ := ret[0].([][]byte) + return ret0 +} + +// SelectedTxs indicates an expected call of SelectedTxs. +func (mr *MockTxSelectorMockRecorder) SelectedTxs(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectedTxs", reflect.TypeOf((*MockTxSelector)(nil).SelectedTxs), ctx) +} diff --git a/baseapp/utils_test.go b/baseapp/utils_test.go new file mode 100644 index 0000000..2a2f811 --- /dev/null +++ b/baseapp/utils_test.go @@ -0,0 +1,439 @@ +package baseapp_test + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "net/url" + "reflect" + "strconv" + "testing" + "unsafe" + + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + cmttypes "github.com/cometbft/cometbft/types" + dbm "github.com/cosmos/cosmos-db" + "github.com/stretchr/testify/require" + + runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1" + appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1" + "cosmossdk.io/core/address" + "cosmossdk.io/core/appconfig" + "cosmossdk.io/depinject" + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + storetypes "cosmossdk.io/store/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + addresscodec "github.com/cosmos/cosmos-sdk/codec/address" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/testutil/mock" + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/mempool" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" + _ "github.com/cosmos/cosmos-sdk/x/auth" + "github.com/cosmos/cosmos-sdk/x/auth/signing" + _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + _ "github.com/cosmos/cosmos-sdk/x/bank" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + _ "github.com/cosmos/cosmos-sdk/x/consensus" + _ "github.com/cosmos/cosmos-sdk/x/mint" + _ "github.com/cosmos/cosmos-sdk/x/params" + _ "github.com/cosmos/cosmos-sdk/x/staking" +) + +var ParamStoreKey = []byte("paramstore") + +// GenesisStateWithSingleValidator initializes GenesisState with a single validator and genesis accounts +// that also act as delegators. +func GenesisStateWithSingleValidator(t *testing.T, codec codec.Codec, builder *runtime.AppBuilder) map[string]json.RawMessage { + t.Helper() + + privVal := mock.NewPV() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + + // create validator set with single validator + validator := cmttypes.NewValidator(pubKey, 1) + valSet := cmttypes.NewValidatorSet([]*cmttypes.Validator{validator}) + + // generate genesis account + senderPrivKey := secp256k1.GenPrivKey() + acc := authtypes.NewBaseAccount(senderPrivKey.PubKey().Address().Bytes(), senderPrivKey.PubKey(), 0, 0) + balances := []banktypes.Balance{ + { + Address: acc.GetAddress().String(), + Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(100000000000000))), + }, + } + + genesisState := builder.DefaultGenesis() + // sus + genesisState, err = simtestutil.GenesisStateWithValSet(codec, genesisState, valSet, []authtypes.GenesisAccount{acc}, balances...) + require.NoError(t, err) + + return genesisState +} + +func makeMinimalConfig() depinject.Config { + var ( + mempoolOpt = baseapp.SetMempool(mempool.NewSenderNonceMempool()) + addressCodec = func() address.Codec { return addresscodec.NewBech32Codec("cosmos") } + validatorAddressCodec = func() runtime.ValidatorAddressCodec { return addresscodec.NewBech32Codec("cosmosvaloper") } + consensusAddressCodec = func() runtime.ConsensusAddressCodec { return addresscodec.NewBech32Codec("cosmosvalcons") } + ) + + return depinject.Configs( + depinject.Supply(mempoolOpt, addressCodec, validatorAddressCodec, consensusAddressCodec), + appconfig.Compose(&appv1alpha1.Config{ + Modules: []*appv1alpha1.ModuleConfig{ + { + Name: "runtime", + Config: appconfig.WrapAny(&runtimev1alpha1.Module{ + AppName: "BaseAppApp", + }), + }, + }, + })) +} + +type MsgKeyValueImpl struct{} + +func (m MsgKeyValueImpl) Set(ctx context.Context, msg *baseapptestutil.MsgKeyValue) (*baseapptestutil.MsgCreateKeyValueResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.KVStore(capKey2).Set(msg.Key, msg.Value) + return &baseapptestutil.MsgCreateKeyValueResponse{}, nil +} + +type CounterServerImplGasMeterOnly struct { + gas uint64 +} + +func (m CounterServerImplGasMeterOnly) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + gas := m.gas + + // if no gas is provided, use the counter as gas. This is useful for testing + if gas == 0 { + gas = uint64(msg.Counter) + } + + sdkCtx.GasMeter().ConsumeGas(gas, "test") + return &baseapptestutil.MsgCreateCounterResponse{}, nil +} + +type mockCounterServer struct { + incrementCounterFn func(context.Context, *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) +} + +func (m mockCounterServer) IncrementCounter(ctx context.Context, req *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { + if m.incrementCounterFn == nil { + panic("not expected to be called") + } + return m.incrementCounterFn(ctx, req) +} + +type NoopCounterServerImpl struct{} + +func (m NoopCounterServerImpl) IncrementCounter( + _ context.Context, + _ *baseapptestutil.MsgCounter, +) (*baseapptestutil.MsgCreateCounterResponse, error) { + return &baseapptestutil.MsgCreateCounterResponse{}, nil +} + +type CounterServerImpl struct { + t *testing.T + capKey storetypes.StoreKey + deliverKey []byte +} + +func (m CounterServerImpl) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { + return incrementCounter(ctx, m.t, m.capKey, m.deliverKey, msg) +} + +type Counter2ServerImpl struct { + t *testing.T + capKey storetypes.StoreKey + deliverKey []byte +} + +func (m Counter2ServerImpl) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter2) (*baseapptestutil.MsgCreateCounterResponse, error) { + return incrementCounter(ctx, m.t, m.capKey, m.deliverKey, msg) +} + +func incrementCounter(ctx context.Context, + t *testing.T, + capKey storetypes.StoreKey, + deliverKey []byte, + msg sdk.Msg, +) (*baseapptestutil.MsgCreateCounterResponse, error) { + t.Helper() + + sdkCtx := sdk.UnwrapSDKContext(ctx) + store := sdkCtx.KVStore(capKey) + + sdkCtx.GasMeter().ConsumeGas(5, "test") + + var msgCount int64 + + switch m := msg.(type) { + case *baseapptestutil.MsgCounter: + if m.FailOnHandler { + return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") + } + msgCount = m.Counter + case *baseapptestutil.MsgCounter2: + if m.FailOnHandler { + return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") + } + msgCount = m.Counter + } + + sdkCtx.EventManager().EmitEvents( + counterEvent(sdk.EventTypeMessage, msgCount), + ) + + _, err := incrementingCounter(t, store, deliverKey, msgCount) + if err != nil { + return nil, err + } + + return &baseapptestutil.MsgCreateCounterResponse{}, nil +} + +func counterEvent(evType string, msgCount int64) sdk.Events { + return sdk.Events{ + sdk.NewEvent( + evType, + sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), + ), + } +} + +func anteHandlerTxTest(t *testing.T, capKey storetypes.StoreKey, storeKey []byte) sdk.AnteHandler { + t.Helper() + + return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { + store := ctx.KVStore(capKey) + counter, failOnAnte := parseTxMemo(t, tx) + + if failOnAnte { + return ctx, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + _, err := incrementingCounter(t, store, storeKey, counter) + if err != nil { + return ctx, err + } + + ctx.EventManager().EmitEvents( + counterEvent("ante_handler", counter), + ) + + ctx = ctx.WithPriority(testTxPriority) + return ctx, nil + } +} + +func incrementingCounter(t *testing.T, store storetypes.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { + t.Helper() + + storedCounter := getIntFromStore(t, store, counterKey) + require.Equal(t, storedCounter, counter) + setIntOnStore(store, counterKey, counter+1) + return &sdk.Result{}, nil +} + +func setIntOnStore(store storetypes.KVStore, key []byte, i int64) { + bz := make([]byte, 8) + n := binary.PutVarint(bz, i) + store.Set(key, bz[:n]) +} + +type paramStore struct { + db *dbm.MemDB +} + +var _ baseapp.ParamStore = (*paramStore)(nil) + +func (ps paramStore) Set(_ context.Context, value cmtproto.ConsensusParams) error { + bz, err := json.Marshal(value) + if err != nil { + return err + } + + return ps.db.Set(ParamStoreKey, bz) +} + +func (ps paramStore) Has(_ context.Context) (bool, error) { + return ps.db.Has(ParamStoreKey) +} + +func (ps paramStore) Get(_ context.Context) (cmtproto.ConsensusParams, error) { + bz, err := ps.db.Get(ParamStoreKey) + if err != nil { + return cmtproto.ConsensusParams{}, err + } + + if len(bz) == 0 { + return cmtproto.ConsensusParams{}, errors.New("params not found") + } + + var params cmtproto.ConsensusParams + if err := json.Unmarshal(bz, ¶ms); err != nil { + return cmtproto.ConsensusParams{}, err + } + + return params, nil +} + +func setTxSignature(t *testing.T, builder client.TxBuilder, nonce uint64) { + t.Helper() + + privKey := secp256k1.GenPrivKeyFromSecret([]byte("test")) + pubKey := privKey.PubKey() + err := builder.SetSignatures( + signingtypes.SignatureV2{ + PubKey: pubKey, + Sequence: nonce, + Data: &signingtypes.SingleSignatureData{}, + }, + ) + require.NoError(t, err) +} + +func testLoadVersionHelper(t *testing.T, app *baseapp.BaseApp, expectedHeight int64, expectedID storetypes.CommitID) { + t.Helper() + + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, expectedHeight, lastHeight) + require.Equal(t, expectedID, lastID) +} + +func getCheckStateCtx(app *baseapp.BaseApp) sdk.Context { + v := reflect.ValueOf(app).Elem() + f := v.FieldByName("checkState") + rf := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() + return rf.MethodByName("Context").Call(nil)[0].Interface().(sdk.Context) +} + +func getFinalizeBlockStateCtx(app *baseapp.BaseApp) sdk.Context { + v := reflect.ValueOf(app).Elem() + f := v.FieldByName("finalizeBlockState") + rf := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() + return rf.MethodByName("Context").Call(nil)[0].Interface().(sdk.Context) +} + +func parseTxMemo(t *testing.T, tx sdk.Tx) (counter int64, failOnAnte bool) { + t.Helper() + + txWithMemo, ok := tx.(sdk.TxWithMemo) + require.True(t, ok) + + memo := txWithMemo.GetMemo() + vals, err := url.ParseQuery(memo) + require.NoError(t, err) + + counter, err = strconv.ParseInt(vals.Get("counter"), 10, 64) + require.NoError(t, err) + + failOnAnte = vals.Get("failOnAnte") == "true" + return counter, failOnAnte +} + +func newTxCounter(t *testing.T, cfg client.TxConfig, counter int64, msgCounters ...int64) signing.Tx { + t.Helper() + + _, _, addr := testdata.KeyTestPubAddr() + msgs := make([]sdk.Msg, 0, len(msgCounters)) + for _, c := range msgCounters { + msg := &baseapptestutil.MsgCounter{Counter: c, FailOnHandler: false, Signer: addr.String()} + msgs = append(msgs, msg) + } + + builder := cfg.NewTxBuilder() + require.NoError(t, builder.SetMsgs(msgs...)) + builder.SetMemo("counter=" + strconv.FormatInt(counter, 10) + "&failOnAnte=false") + setTxSignature(t, builder, uint64(counter)) + + return builder.GetTx() +} + +func getIntFromStore(t *testing.T, store storetypes.KVStore, key []byte) int64 { + t.Helper() + + bz := store.Get(key) + if len(bz) == 0 { + return 0 + } + + i, err := binary.ReadVarint(bytes.NewBuffer(bz)) + require.NoError(t, err) + + return i +} + +func setFailOnAnte(t *testing.T, cfg client.TxConfig, tx signing.Tx, failOnAnte bool) signing.Tx { + t.Helper() + + builder := cfg.NewTxBuilder() + require.NoError(t, builder.SetMsgs(tx.GetMsgs()...)) + + memo := tx.GetMemo() + vals, err := url.ParseQuery(memo) + require.NoError(t, err) + + vals.Set("failOnAnte", strconv.FormatBool(failOnAnte)) + memo = vals.Encode() + builder.SetMemo(memo) + setTxSignature(t, builder, 1) + + return builder.GetTx() +} + +func setFailOnHandler(cfg client.TxConfig, tx signing.Tx, fail bool) signing.Tx { + builder := cfg.NewTxBuilder() + builder.SetMemo(tx.GetMemo()) + + msgs := tx.GetMsgs() + for i, msg := range msgs { + msgs[i] = &baseapptestutil.MsgCounter{ + Counter: msg.(*baseapptestutil.MsgCounter).Counter, + FailOnHandler: fail, + } + } + + if err := builder.SetMsgs(msgs...); err != nil { + panic(err) + } + return builder.GetTx() +} + +// wonkyMsg is to be used to run a MsgCounter2 message when the MsgCounter2 handler is not registered. +func wonkyMsg(t *testing.T, cfg client.TxConfig, tx signing.Tx) signing.Tx { + t.Helper() + + t.Helper() + builder := cfg.NewTxBuilder() + builder.SetMemo(tx.GetMemo()) + + msgs := tx.GetMsgs() + msgs = append(msgs, &baseapptestutil.MsgCounter2{}) + + err := builder.SetMsgs(msgs...) + require.NoError(t, err) + return builder.GetTx() +} diff --git a/buf.work.yaml b/buf.work.yaml new file mode 100644 index 0000000..87a0731 --- /dev/null +++ b/buf.work.yaml @@ -0,0 +1,3 @@ +version: v1 +directories: + - proto \ No newline at end of file diff --git a/client/account_retriever.go b/client/account_retriever.go new file mode 100644 index 0000000..eaa97b9 --- /dev/null +++ b/client/account_retriever.go @@ -0,0 +1,49 @@ +package client + +import ( + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// Account defines a read-only version of the auth module's AccountI. +type Account interface { + GetAddress() sdk.AccAddress + GetPubKey() cryptotypes.PubKey // can return nil. + GetAccountNumber() uint64 + GetSequence() uint64 +} + +// AccountRetriever defines the interfaces required by transactions to +// ensure an account exists and to be able to query for account fields necessary +// for signing. +type AccountRetriever interface { + GetAccount(clientCtx Context, addr sdk.AccAddress) (Account, error) + GetAccountWithHeight(clientCtx Context, addr sdk.AccAddress) (Account, int64, error) + EnsureExists(clientCtx Context, addr sdk.AccAddress) error + GetAccountNumberSequence(clientCtx Context, addr sdk.AccAddress) (accNum, accSeq uint64, err error) +} + +var _ AccountRetriever = (*MockAccountRetriever)(nil) + +// MockAccountRetriever defines a no-op basic AccountRetriever that can be used +// in mocked contexts. Tests or context that need more sophisticated testing +// state should implement their own mock AccountRetriever. +type MockAccountRetriever struct { + ReturnAccNum, ReturnAccSeq uint64 +} + +func (mar MockAccountRetriever) GetAccount(_ Context, _ sdk.AccAddress) (Account, error) { + return nil, nil +} + +func (mar MockAccountRetriever) GetAccountWithHeight(_ Context, _ sdk.AccAddress) (Account, int64, error) { + return nil, 0, nil +} + +func (mar MockAccountRetriever) EnsureExists(_ Context, _ sdk.AccAddress) error { + return nil +} + +func (mar MockAccountRetriever) GetAccountNumberSequence(_ Context, _ sdk.AccAddress) (uint64, uint64, error) { + return mar.ReturnAccNum, mar.ReturnAccSeq, nil +} diff --git a/client/broadcast.go b/client/broadcast.go new file mode 100644 index 0000000..d3ae941 --- /dev/null +++ b/client/broadcast.go @@ -0,0 +1,147 @@ +package client + +import ( + "context" + "fmt" + "strings" + + "github.com/cometbft/cometbft/mempool" + cmttypes "github.com/cometbft/cometbft/types" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/tx" +) + +// BroadcastTx broadcasts a transactions either synchronously or asynchronously +// based on the context parameters. The result of the broadcast is parsed into +// an intermediate structure which is logged if the context has a logger +// defined. +func (ctx Context) BroadcastTx(txBytes []byte) (res *sdk.TxResponse, err error) { + switch ctx.BroadcastMode { + case flags.BroadcastSync: + res, err = ctx.BroadcastTxSync(txBytes) + + case flags.BroadcastAsync: + res, err = ctx.BroadcastTxAsync(txBytes) + + default: + return nil, fmt.Errorf("unsupported return type %s; supported types: sync, async", ctx.BroadcastMode) + } + + return res, err +} + +// Deprecated: Use CheckCometError instead. +func CheckTendermintError(err error, tx cmttypes.Tx) *sdk.TxResponse { + return CheckCometError(err, tx) +} + +// CheckCometError checks if the error returned from BroadcastTx is a +// CometBFT error that is returned before the tx is submitted due to +// precondition checks that failed. If an CometBFT error is detected, this +// function returns the correct code back in TxResponse. +// +// TODO: Avoid brittle string matching in favor of error matching. This requires +// a change to CometBFT's RPCError type to allow retrieval or matching against +// a concrete error type. +func CheckCometError(err error, tx cmttypes.Tx) *sdk.TxResponse { + if err == nil { + return nil + } + + errStr := strings.ToLower(err.Error()) + txHash := fmt.Sprintf("%X", tx.Hash()) + + switch { + case strings.Contains(errStr, strings.ToLower(mempool.ErrTxInCache.Error())): + return &sdk.TxResponse{ + Code: sdkerrors.ErrTxInMempoolCache.ABCICode(), + Codespace: sdkerrors.ErrTxInMempoolCache.Codespace(), + TxHash: txHash, + } + + case strings.Contains(errStr, "mempool is full"): + return &sdk.TxResponse{ + Code: sdkerrors.ErrMempoolIsFull.ABCICode(), + Codespace: sdkerrors.ErrMempoolIsFull.Codespace(), + TxHash: txHash, + } + + case strings.Contains(errStr, "tx too large"): + return &sdk.TxResponse{ + Code: sdkerrors.ErrTxTooLarge.ABCICode(), + Codespace: sdkerrors.ErrTxTooLarge.Codespace(), + TxHash: txHash, + } + + default: + return nil + } +} + +// BroadcastTxSync broadcasts transaction bytes to a CometBFT node +// synchronously (i.e. returns after CheckTx execution). +func (ctx Context) BroadcastTxSync(txBytes []byte) (*sdk.TxResponse, error) { + node, err := ctx.GetNode() + if err != nil { + return nil, err + } + + res, err := node.BroadcastTxSync(context.Background(), txBytes) + if errRes := CheckCometError(err, txBytes); errRes != nil { + return errRes, nil + } + + return sdk.NewResponseFormatBroadcastTx(res), err +} + +// BroadcastTxAsync broadcasts transaction bytes to a CometBFT node +// asynchronously (i.e. returns immediately). +func (ctx Context) BroadcastTxAsync(txBytes []byte) (*sdk.TxResponse, error) { + node, err := ctx.GetNode() + if err != nil { + return nil, err + } + + res, err := node.BroadcastTxAsync(context.Background(), txBytes) + if errRes := CheckCometError(err, txBytes); errRes != nil { + return errRes, nil + } + + return sdk.NewResponseFormatBroadcastTx(res), err +} + +// TxServiceBroadcast is a helper function to broadcast a Tx with the correct gRPC types +// from the tx service. Calls `clientCtx.BroadcastTx` under the hood. +func TxServiceBroadcast(_ context.Context, clientCtx Context, req *tx.BroadcastTxRequest) (*tx.BroadcastTxResponse, error) { + if req == nil || req.TxBytes == nil { + return nil, status.Error(codes.InvalidArgument, "invalid empty tx") + } + + clientCtx = clientCtx.WithBroadcastMode(normalizeBroadcastMode(req.Mode)) + resp, err := clientCtx.BroadcastTx(req.TxBytes) + if err != nil { + return nil, err + } + + return &tx.BroadcastTxResponse{ + TxResponse: resp, + }, nil +} + +// normalizeBroadcastMode converts a broadcast mode into a normalized string +// to be passed into the clientCtx. +func normalizeBroadcastMode(mode tx.BroadcastMode) string { + switch mode { + case tx.BroadcastMode_BROADCAST_MODE_ASYNC: + return "async" + case tx.BroadcastMode_BROADCAST_MODE_SYNC: + return "sync" + default: + return "unspecified" + } +} diff --git a/client/broadcast_test.go b/client/broadcast_test.go new file mode 100644 index 0000000..a832af3 --- /dev/null +++ b/client/broadcast_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "context" + "fmt" + "testing" + + "github.com/cometbft/cometbft/crypto/tmhash" + "github.com/cometbft/cometbft/mempool" + "github.com/cometbft/cometbft/rpc/client/mock" + coretypes "github.com/cometbft/cometbft/rpc/core/types" + cmttypes "github.com/cometbft/cometbft/types" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client/flags" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +type MockClient struct { + mock.Client + err error +} + +func (c MockClient) BroadcastTxAsync(_ context.Context, _ cmttypes.Tx) (*coretypes.ResultBroadcastTx, error) { + return nil, c.err +} + +func (c MockClient) BroadcastTxSync(_ context.Context, _ cmttypes.Tx) (*coretypes.ResultBroadcastTx, error) { + return nil, c.err +} + +func CreateContextWithErrorAndMode(err error, mode string) Context { + return Context{ + Client: MockClient{err: err}, + BroadcastMode: mode, + } +} + +// Test the correct code is returned when +func TestBroadcastError(t *testing.T) { + errors := map[error]uint32{ + mempool.ErrTxInCache: sdkerrors.ErrTxInMempoolCache.ABCICode(), + mempool.ErrTxTooLarge{}: sdkerrors.ErrTxTooLarge.ABCICode(), + mempool.ErrMempoolIsFull{}: sdkerrors.ErrMempoolIsFull.ABCICode(), + } + + modes := []string{ + flags.BroadcastAsync, + flags.BroadcastSync, + } + + txBytes := []byte{0xA, 0xB} + txHash := fmt.Sprintf("%X", tmhash.Sum(txBytes)) + + for _, mode := range modes { + for err, code := range errors { + ctx := CreateContextWithErrorAndMode(err, mode) + resp, returnedErr := ctx.BroadcastTx(txBytes) + require.NoError(t, returnedErr) + require.Equal(t, code, resp.Code) + require.NotEmpty(t, resp.Codespace) + require.Equal(t, txHash, resp.TxHash) + } + } +} diff --git a/client/cmd.go b/client/cmd.go new file mode 100644 index 0000000..1dc62de --- /dev/null +++ b/client/cmd.go @@ -0,0 +1,375 @@ +package client + +import ( + "context" + "crypto/tls" + "fmt" + "slices" + "strings" + + "github.com/cockroachdb/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + signingv1beta1 "cosmossdk.io/api/cosmos/tx/signing/v1beta1" + + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// ClientContextKey defines the context key used to retrieve a client.Context from +// a command's Context. +const ClientContextKey = sdk.ContextKey("client.context") + +// SetCmdClientContextHandler is to be used in a command pre-hook execution to +// read flags that populate a Context and sets that to the command's Context. +func SetCmdClientContextHandler(clientCtx Context, cmd *cobra.Command) (err error) { + clientCtx, err = ReadPersistentCommandFlags(clientCtx, cmd.Flags()) + if err != nil { + return err + } + + return SetCmdClientContext(cmd, clientCtx) +} + +// ValidateCmd returns unknown command error or Help display if help flag set +func ValidateCmd(cmd *cobra.Command, args []string) error { + var unknownCmd string + var skipNext bool + + for _, arg := range args { + // search for help flag + if arg == "--help" || arg == "-h" { + return cmd.Help() + } + + // check if the current arg is a flag + switch { + case len(arg) > 0 && (arg[0] == '-'): + // the next arg should be skipped if the current arg is a + // flag and does not use "=" to assign the flag's value + if !strings.Contains(arg, "=") { + skipNext = true + } else { + skipNext = false + } + case skipNext: + // skip current arg + skipNext = false + case unknownCmd == "": + // unknown command found + // continue searching for help flag + unknownCmd = arg + } + } + + // return the help screen if no unknown command is found + if unknownCmd != "" { + err := fmt.Sprintf("unknown command \"%s\" for \"%s\"", unknownCmd, cmd.CalledAs()) + + // build suggestions for unknown argument + if suggestions := cmd.SuggestionsFor(unknownCmd); len(suggestions) > 0 { + err += "\n\nDid you mean this?\n" + for _, s := range suggestions { + err += fmt.Sprintf("\t%v\n", s) + } + } + return errors.New(err) + } + + return cmd.Help() +} + +// ReadPersistentCommandFlags returns a Context with fields set for "persistent" +// or common flags that do not necessarily change with context. +// +// Note, the provided clientCtx may have field pre-populated. The following order +// of precedence occurs: +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func ReadPersistentCommandFlags(clientCtx Context, flagSet *pflag.FlagSet) (Context, error) { + if clientCtx.OutputFormat == "" || flagSet.Changed(flags.FlagOutput) { + output, _ := flagSet.GetString(flags.FlagOutput) + clientCtx = clientCtx.WithOutputFormat(output) + } + + if clientCtx.HomeDir == "" || flagSet.Changed(flags.FlagHome) { + homeDir, _ := flagSet.GetString(flags.FlagHome) + clientCtx = clientCtx.WithHomeDir(homeDir) + } + + if !clientCtx.Simulate || flagSet.Changed(flags.FlagDryRun) { + dryRun, _ := flagSet.GetBool(flags.FlagDryRun) + clientCtx = clientCtx.WithSimulation(dryRun) + } + + if clientCtx.KeyringDir == "" || flagSet.Changed(flags.FlagKeyringDir) { + keyringDir, _ := flagSet.GetString(flags.FlagKeyringDir) + + // The keyring directory is optional and falls back to the home directory + // if omitted. + if keyringDir == "" { + keyringDir = clientCtx.HomeDir + } + + clientCtx = clientCtx.WithKeyringDir(keyringDir) + } + + if clientCtx.ChainID == "" || flagSet.Changed(flags.FlagChainID) { + chainID, _ := flagSet.GetString(flags.FlagChainID) + clientCtx = clientCtx.WithChainID(chainID) + } + + if clientCtx.Keyring == nil || flagSet.Changed(flags.FlagKeyringBackend) { + keyringBackend, _ := flagSet.GetString(flags.FlagKeyringBackend) + + if keyringBackend != "" { + kr, err := NewKeyringFromBackend(clientCtx, keyringBackend) + if err != nil { + return clientCtx, err + } + + clientCtx = clientCtx.WithKeyring(kr) + } + } + + if clientCtx.Client == nil || flagSet.Changed(flags.FlagNode) { + rpcURI, _ := flagSet.GetString(flags.FlagNode) + if rpcURI != "" { + clientCtx = clientCtx.WithNodeURI(rpcURI) + + client, err := NewClientFromNode(rpcURI) + if err != nil { + return clientCtx, err + } + + clientCtx = clientCtx.WithClient(client) + } + } + + if clientCtx.GRPCClient == nil || flagSet.Changed(flags.FlagGRPC) { + grpcURI, _ := flagSet.GetString(flags.FlagGRPC) + if grpcURI != "" { + var dialOpts []grpc.DialOption + + useInsecure, _ := flagSet.GetBool(flags.FlagGRPCInsecure) + if useInsecure { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{ + MinVersion: tls.VersionTLS12, + }))) + } + + grpcClient, err := grpc.Dial(grpcURI, dialOpts...) // nolint:staticcheck // grpc.Dial is deprecated but we still use it + if err != nil { + return Context{}, err + } + clientCtx = clientCtx.WithGRPCClient(grpcClient) + } + } + + return clientCtx, nil +} + +// readQueryCommandFlags returns an updated Context with fields set based on flags +// defined in AddQueryFlagsToCmd. An error is returned if any flag query fails. +// +// Note, the provided clientCtx may have field pre-populated. The following order +// of precedence occurs: +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func readQueryCommandFlags(clientCtx Context, flagSet *pflag.FlagSet) (Context, error) { + if clientCtx.Height == 0 || flagSet.Changed(flags.FlagHeight) { + height, _ := flagSet.GetInt64(flags.FlagHeight) + clientCtx = clientCtx.WithHeight(height) + } + + if !clientCtx.UseLedger || flagSet.Changed(flags.FlagUseLedger) { + useLedger, _ := flagSet.GetBool(flags.FlagUseLedger) + clientCtx = clientCtx.WithUseLedger(useLedger) + } + + return ReadPersistentCommandFlags(clientCtx, flagSet) +} + +// readTxCommandFlags returns an updated Context with fields set based on flags +// defined in AddTxFlagsToCmd. An error is returned if any flag query fails. +// +// Note, the provided clientCtx may have field pre-populated. The following order +// of precedence occurs: +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func readTxCommandFlags(clientCtx Context, flagSet *pflag.FlagSet) (Context, error) { + clientCtx, err := ReadPersistentCommandFlags(clientCtx, flagSet) + if err != nil { + return clientCtx, err + } + + if !clientCtx.GenerateOnly || flagSet.Changed(flags.FlagGenerateOnly) { + genOnly, _ := flagSet.GetBool(flags.FlagGenerateOnly) + clientCtx = clientCtx.WithGenerateOnly(genOnly) + } + + if !clientCtx.Offline || flagSet.Changed(flags.FlagOffline) { + offline, _ := flagSet.GetBool(flags.FlagOffline) + clientCtx = clientCtx.WithOffline(offline) + } + + if !clientCtx.UseLedger || flagSet.Changed(flags.FlagUseLedger) { + useLedger, _ := flagSet.GetBool(flags.FlagUseLedger) + clientCtx = clientCtx.WithUseLedger(useLedger) + } + + if clientCtx.BroadcastMode == "" || flagSet.Changed(flags.FlagBroadcastMode) { + bMode, _ := flagSet.GetString(flags.FlagBroadcastMode) + clientCtx = clientCtx.WithBroadcastMode(bMode) + } + + if !clientCtx.SkipConfirm || flagSet.Changed(flags.FlagSkipConfirmation) { + skipConfirm, _ := flagSet.GetBool(flags.FlagSkipConfirmation) + clientCtx = clientCtx.WithSkipConfirmation(skipConfirm) + } + + if clientCtx.SignModeStr == "" || flagSet.Changed(flags.FlagSignMode) { + signModeStr, _ := flagSet.GetString(flags.FlagSignMode) + clientCtx = clientCtx.WithSignModeStr(signModeStr) + } + + if clientCtx.FeePayer == nil || flagSet.Changed(flags.FlagFeePayer) { + payer, _ := flagSet.GetString(flags.FlagFeePayer) + + if payer != "" { + payerAcc, err := sdk.AccAddressFromBech32(payer) + if err != nil { + return clientCtx, err + } + + clientCtx = clientCtx.WithFeePayerAddress(payerAcc) + } + } + + if clientCtx.FeeGranter == nil || flagSet.Changed(flags.FlagFeeGranter) { + granter, _ := flagSet.GetString(flags.FlagFeeGranter) + + if granter != "" { + granterAcc, err := sdk.AccAddressFromBech32(granter) + if err != nil { + return clientCtx, err + } + + clientCtx = clientCtx.WithFeeGranterAddress(granterAcc) + } + } + + if clientCtx.From == "" || flagSet.Changed(flags.FlagFrom) { + from, _ := flagSet.GetString(flags.FlagFrom) + fromAddr, fromName, keyType, err := GetFromFields(clientCtx, clientCtx.Keyring, from) + if err != nil { + return clientCtx, fmt.Errorf("failed to convert address field to address: %w", err) + } + + clientCtx = clientCtx.WithFrom(from).WithFromAddress(fromAddr).WithFromName(fromName) + + if keyType == keyring.TypeLedger && clientCtx.SignModeStr == flags.SignModeTextual { + if !slices.Contains(clientCtx.TxConfig.SignModeHandler().SupportedModes(), signingv1beta1.SignMode_SIGN_MODE_TEXTUAL) { + return clientCtx, fmt.Errorf("SIGN_MODE_TEXTUAL is not available") + } + } + + // If the `from` signer account is a ledger key, we need to use + // SIGN_MODE_AMINO_JSON, because ledger doesn't support proto yet. + // ref: https://github.com/cosmos/cosmos-sdk/issues/8109 + if keyType == keyring.TypeLedger && + clientCtx.SignModeStr != flags.SignModeLegacyAminoJSON && + clientCtx.SignModeStr != flags.SignModeTextual && + !clientCtx.LedgerHasProtobuf { + fmt.Println("Default sign-mode 'direct' not supported by Ledger, using sign-mode 'amino-json'.") + clientCtx = clientCtx.WithSignModeStr(flags.SignModeLegacyAminoJSON) + } + } + + if !clientCtx.IsAux || flagSet.Changed(flags.FlagAux) { + isAux, _ := flagSet.GetBool(flags.FlagAux) + clientCtx = clientCtx.WithAux(isAux) + if isAux { + // If the user didn't explicitly set an --output flag, use JSON by default. + if clientCtx.OutputFormat == "" || !flagSet.Changed(flags.FlagOutput) { + clientCtx = clientCtx.WithOutputFormat(flags.OutputFormatJSON) + } + + // If the user didn't explicitly set a --sign-mode flag, use DIRECT_AUX by default. + if clientCtx.SignModeStr == "" || !flagSet.Changed(flags.FlagSignMode) { + clientCtx = clientCtx.WithSignModeStr(flags.SignModeDirectAux) + } + } + } + + return clientCtx, nil +} + +// GetClientQueryContext returns a Context from a command with fields set based on flags +// defined in AddQueryFlagsToCmd. An error is returned if any flag query fails. +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func GetClientQueryContext(cmd *cobra.Command) (Context, error) { + ctx := GetClientContextFromCmd(cmd) + return readQueryCommandFlags(ctx, cmd.Flags()) +} + +// GetClientTxContext returns a Context from a command with fields set based on flags +// defined in AddTxFlagsToCmd. An error is returned if any flag query fails. +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func GetClientTxContext(cmd *cobra.Command) (Context, error) { + ctx := GetClientContextFromCmd(cmd) + return readTxCommandFlags(ctx, cmd.Flags()) +} + +// GetClientContextFromCmd returns a Context from a command or an empty Context +// if it has not been set. +func GetClientContextFromCmd(cmd *cobra.Command) Context { + if v := cmd.Context().Value(ClientContextKey); v != nil { + clientCtxPtr := v.(*Context) + return *clientCtxPtr + } + + return Context{} +} + +// SetCmdClientContext sets a command's Context value to the provided argument. +// If the context has not been set, set the given context as the default. +func SetCmdClientContext(cmd *cobra.Command, clientCtx Context) error { + cmdCtx := cmd.Context() + if cmdCtx == nil { + cmdCtx = context.Background() + } + + v := cmd.Context().Value(ClientContextKey) + if clientCtxPtr, ok := v.(*Context); ok { + *clientCtxPtr = clientCtx + } else { + cmd.SetContext(context.WithValue(cmdCtx, ClientContextKey, &clientCtx)) + } + + return nil +} diff --git a/client/cmd_test.go b/client/cmd_test.go new file mode 100644 index 0000000..864037f --- /dev/null +++ b/client/cmd_test.go @@ -0,0 +1,139 @@ +package client_test + +import ( + "context" + "fmt" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/testutil" +) + +func TestValidateCmd(t *testing.T) { + // setup root and subcommands + rootCmd := &cobra.Command{ + Use: "root", + } + queryCmd := &cobra.Command{ + Use: "query", + } + rootCmd.AddCommand(queryCmd) + + // command being tested + distCmd := &cobra.Command{ + Use: "distr", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + } + queryCmd.AddCommand(distCmd) + + commissionCmd := &cobra.Command{ + Use: "commission", + } + distCmd.AddCommand(commissionCmd) + + tests := []struct { + reason string + args []string + wantErr bool + }{ + {"misspelled command", []string{"COMMISSION"}, true}, + {"no command provided", []string{}, false}, + {"help flag", []string{"COMMISSION", "--help"}, false}, + {"shorthand help flag", []string{"COMMISSION", "-h"}, false}, + {"flag only, no command provided", []string{"--gas", "1000atom"}, false}, + {"flag and misspelled command", []string{"--gas", "1000atom", "COMMISSION"}, true}, + } + + for _, tt := range tests { + err := client.ValidateCmd(distCmd, tt.args) + require.Equal(t, tt.wantErr, err != nil, tt.reason) + } +} + +func TestSetCmdClientContextHandler(t *testing.T) { + initClientCtx := client.Context{}.WithHomeDir("/foo/bar").WithChainID("test-chain").WithKeyringDir("/foo/bar") + + newCmd := func() *cobra.Command { + c := &cobra.Command{ + PreRunE: func(cmd *cobra.Command, args []string) error { + return client.SetCmdClientContextHandler(initClientCtx, cmd) + }, + RunE: func(cmd *cobra.Command, _ []string) error { + _, err := client.GetClientTxContext(cmd) + return err + }, + } + + c.Flags().String(flags.FlagChainID, "", "network chain ID") + c.Flags().String(flags.FlagHome, "", "home dir") + + return c + } + + testCases := []struct { + name string + expectedContext client.Context + args []string + ctx context.Context + }{ + { + "no flags set", + initClientCtx, + []string{}, + context.WithValue(context.Background(), client.ClientContextKey, &client.Context{}), + }, + { + "flags set", + initClientCtx.WithChainID("new-chain-id"), + []string{ + fmt.Sprintf("--%s=new-chain-id", flags.FlagChainID), + }, + context.WithValue(context.Background(), client.ClientContextKey, &client.Context{}), + }, + { + "flags set with space", + initClientCtx.WithHomeDir("/tmp/dir"), + []string{ + fmt.Sprintf("--%s", flags.FlagHome), + "/tmp/dir", + }, + context.Background(), + }, + { + "no context provided", + initClientCtx.WithHomeDir("/tmp/noctx"), + []string{ + fmt.Sprintf("--%s", flags.FlagHome), + "/tmp/noctx", + }, + nil, + }, + { + "with invalid client value in the context", + initClientCtx.WithHomeDir("/tmp/invalid"), + []string{ + fmt.Sprintf("--%s", flags.FlagHome), + "/tmp/invalid", + }, + context.WithValue(context.Background(), client.ClientContextKey, "invalid"), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := newCmd() + _ = testutil.ApplyMockIODiscardOutErr(cmd) + cmd.SetArgs(tc.args) + + require.NoError(t, cmd.ExecuteContext(tc.ctx)) + + clientCtx := client.GetClientContextFromCmd(cmd) + require.Equal(t, tc.expectedContext, clientCtx) + }) + } +} diff --git a/client/cometbft.go b/client/cometbft.go new file mode 100644 index 0000000..3dc7da9 --- /dev/null +++ b/client/cometbft.go @@ -0,0 +1,36 @@ +package client + +import ( + "context" + + rpcclient "github.com/cometbft/cometbft/rpc/client" + coretypes "github.com/cometbft/cometbft/rpc/core/types" +) + +// CometRPC defines the interface of a CometBFT RPC client needed for +// queries and transaction handling. +type CometRPC interface { + rpcclient.ABCIClient + + Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) + Status(context.Context) (*coretypes.ResultStatus, error) + Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash []byte) (*coretypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) + Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) + Tx(ctx context.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) + TxSearch( + ctx context.Context, + query string, + prove bool, + page, perPage *int, + orderBy string, + ) (*coretypes.ResultTxSearch, error) + BlockSearch( + ctx context.Context, + query string, + page, perPage *int, + orderBy string, + ) (*coretypes.ResultBlockSearch, error) +} diff --git a/client/config/config.go b/client/config/config.go new file mode 100644 index 0000000..656513a --- /dev/null +++ b/client/config/config.go @@ -0,0 +1,121 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/cosmos/cosmos-sdk/client" +) + +// DefaultConfig returns default config for the client.toml +func DefaultConfig() *ClientConfig { + return &ClientConfig{ + ChainID: "", + KeyringBackend: "os", + KeyringDefaultKeyName: "", + Output: "text", + Node: "tcp://localhost:26657", + BroadcastMode: "sync", + } +} + +type ClientConfig struct { + ChainID string `mapstructure:"chain-id" json:"chain-id"` + KeyringBackend string `mapstructure:"keyring-backend" json:"keyring-backend"` + KeyringDefaultKeyName string `mapstructure:"keyring-default-keyname" json:"keyring-default-keyname"` + Output string `mapstructure:"output" json:"output"` + Node string `mapstructure:"node" json:"node"` + BroadcastMode string `mapstructure:"broadcast-mode" json:"broadcast-mode"` +} + +func (c *ClientConfig) SetChainID(chainID string) { + c.ChainID = chainID +} + +func (c *ClientConfig) SetKeyringBackend(keyringBackend string) { + c.KeyringBackend = keyringBackend +} + +func (c *ClientConfig) SetOutput(output string) { + c.Output = output +} + +func (c *ClientConfig) SetNode(node string) { + c.Node = node +} + +func (c *ClientConfig) SetBroadcastMode(broadcastMode string) { + c.BroadcastMode = broadcastMode +} + +// ReadDefaultValuesFromDefaultClientConfig reads default values from default client.toml file and updates them in client.Context +// The client.toml is then discarded. +func ReadDefaultValuesFromDefaultClientConfig(ctx client.Context) (client.Context, error) { + prevHomeDir := ctx.HomeDir + dir, err := os.MkdirTemp("", "simapp") + if err != nil { + return ctx, fmt.Errorf("couldn't create temp dir: %w", err) + } + defer os.RemoveAll(dir) + + ctx.HomeDir = dir + ctx, err = ReadFromClientConfig(ctx) + if err != nil { + return ctx, fmt.Errorf("couldn't create client config: %w", err) + } + + ctx.HomeDir = prevHomeDir + return ctx, nil +} + +// ReadFromClientConfig reads values from client.toml file and updates them in client Context +func ReadFromClientConfig(ctx client.Context) (client.Context, error) { + configPath := filepath.Join(ctx.HomeDir, "config") + configFilePath := filepath.Join(configPath, "client.toml") + conf := DefaultConfig() + + // when client.toml does not exist create and init with default values + if _, err := os.Stat(configFilePath); os.IsNotExist(err) { + if err := os.MkdirAll(configPath, os.ModePerm); err != nil { + return ctx, fmt.Errorf("couldn't make client config: %w", err) + } + + if ctx.ChainID != "" { + conf.ChainID = ctx.ChainID // chain-id will be written to the client.toml while initiating the chain. + } + + if err := writeConfigToFile(configFilePath, conf); err != nil { + return ctx, fmt.Errorf("could not write client config to the file: %w", err) + } + } + + conf, err := getClientConfig(configPath, ctx.Viper) + if err != nil { + return ctx, fmt.Errorf("couldn't get client config: %w", err) + } + // we need to update KeyringDir field on Client Context first cause it is used in NewKeyringFromBackend + ctx = ctx.WithOutputFormat(conf.Output). + WithChainID(conf.ChainID). + WithKeyringDir(ctx.HomeDir). + WithKeyringDefaultKeyName(conf.KeyringDefaultKeyName) + + keyring, err := client.NewKeyringFromBackend(ctx, conf.KeyringBackend) + if err != nil { + return ctx, fmt.Errorf("couldn't get keyring: %w", err) + } + + ctx = ctx.WithKeyring(keyring) + + // https://github.com/cosmos/cosmos-sdk/issues/8986 + client, err := client.NewClientFromNode(conf.Node) + if err != nil { + return ctx, fmt.Errorf("couldn't get client from nodeURI: %w", err) + } + + ctx = ctx.WithNodeURI(conf.Node). + WithClient(client). + WithBroadcastMode(conf.BroadcastMode) + + return ctx, nil +} diff --git a/client/config/config_test.go b/client/config/config_test.go new file mode 100644 index 0000000..cd813f2 --- /dev/null +++ b/client/config/config_test.go @@ -0,0 +1,96 @@ +package config_test + +import ( + "fmt" + "os" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/config" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" +) + +const ( + chainID = "test-chain" + nodeEnv = "CONFIG_TEST_NODE" + testNode1 = "http://localhost:1" + testNode2 = "http://localhost:2" +) + +// initClientContext initiates client Context for tests +func initClientContext(t *testing.T, envVar string) (client.Context, func()) { + t.Helper() + home := t.TempDir() + clientCtx := client.Context{}. + WithHomeDir(home). + WithViper(""). + WithCodec(codec.NewProtoCodec(codectypes.NewInterfaceRegistry())). + WithChainID(chainID) + + if envVar != "" { + require.NoError(t, os.Setenv(nodeEnv, envVar)) + } + + clientCtx, err := config.ReadFromClientConfig(clientCtx) + require.NoError(t, err) + require.Equal(t, clientCtx.ChainID, chainID) + + return clientCtx, func() { + _ = os.RemoveAll(home) + _ = os.Unsetenv(nodeEnv) + } +} + +func TestConfigCmdEnvFlag(t *testing.T) { + tt := []struct { + name string + envVar string + args []string + expNode string + }{ + {"env var is set with no flag", testNode1, []string{}, testNode1}, + {"env var is set with a flag", testNode1, []string{fmt.Sprintf("--%s=%s", flags.FlagNode, testNode2)}, testNode2}, + {"env var is not set with no flag", "", []string{}, "tcp://localhost:26657"}, + {"env var is not set with a flag", "", []string{fmt.Sprintf("--%s=%s", flags.FlagNode, testNode2)}, testNode2}, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + testCmd := &cobra.Command{ + Use: "test", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + + return fmt.Errorf("%s", clientCtx.NodeURI) + }, + } + flags.AddQueryFlagsToCmd(testCmd) + + clientCtx, cleanup := initClientContext(t, tc.envVar) + defer func() { + cleanup() + }() + /* + env var is set with a flag + + NODE=http://localhost:1 test-cmd --node http://localhost:2 + Prints "http://localhost:2" + + It prints http://localhost:2 cause a flag has the higher priority than env variable. + */ + + _, err := clitestutil.ExecTestCLICmd(clientCtx, testCmd, tc.args) + require.Error(t, err) + require.Contains(t, err.Error(), tc.expNode) + }) + } +} diff --git a/client/config/toml.go b/client/config/toml.go new file mode 100644 index 0000000..0da4ac6 --- /dev/null +++ b/client/config/toml.go @@ -0,0 +1,66 @@ +package config + +import ( + "bytes" + "os" + "text/template" + + "github.com/spf13/viper" +) + +const defaultConfigTemplate = `# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +############################################################################### +### Client Configuration ### +############################################################################### + +# The network chain ID +chain-id = "{{ .ChainID }}" +# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) +keyring-backend = "{{ .KeyringBackend }}" +# Default key name, if set, defines the default key to use for signing transaction when the --from flag is not specified +keyring-default-keyname = "{{ .KeyringDefaultKeyName }}" +# CLI output format (text|json) +output = "{{ .Output }}" +# : to CometBFT RPC interface for this chain +node = "{{ .Node }}" +# Transaction broadcasting mode (sync|async) +broadcast-mode = "{{ .BroadcastMode }}" +` + +// writeConfigToFile parses defaultConfigTemplate, renders config using the template and writes it to +// configFilePath. +func writeConfigToFile(configFilePath string, config *ClientConfig) error { + var buffer bytes.Buffer + + tmpl := template.New("clientConfigFileTemplate") + configTemplate, err := tmpl.Parse(defaultConfigTemplate) + if err != nil { + return err + } + + if err := configTemplate.Execute(&buffer, config); err != nil { + return err + } + + return os.WriteFile(configFilePath, buffer.Bytes(), 0o600) +} + +// getClientConfig reads values from client.toml file and unmarshalls them into ClientConfig +func getClientConfig(configPath string, v *viper.Viper) (*ClientConfig, error) { + v.AddConfigPath(configPath) + v.SetConfigName("client") + v.SetConfigType("toml") + + if err := v.ReadInConfig(); err != nil { + return nil, err + } + + conf := DefaultConfig() + if err := v.Unmarshal(conf); err != nil { + return nil, err + } + + return conf, nil +} diff --git a/client/context.go b/client/context.go new file mode 100644 index 0000000..47fdaa7 --- /dev/null +++ b/client/context.go @@ -0,0 +1,462 @@ +package client + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path" + "strings" + + "github.com/cosmos/gogoproto/proto" + "github.com/spf13/viper" + "google.golang.org/grpc" + "sigs.k8s.io/yaml" + + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// PreprocessTxFn defines a hook by which chains can preprocess transactions before broadcasting +type PreprocessTxFn func(chainID string, key keyring.KeyType, tx TxBuilder) error + +// Context implements a typical context created in SDK modules for transaction +// handling and queries. +type Context struct { + FromAddress sdk.AccAddress + Client CometRPC + GRPCClient *grpc.ClientConn + GRPCConnProvider *GRPCConnProvider + ChainID string + Codec codec.Codec + InterfaceRegistry codectypes.InterfaceRegistry + Input io.Reader + Keyring keyring.Keyring + KeyringOptions []keyring.Option + KeyringDir string + KeyringDefaultKeyName string + Output io.Writer + OutputFormat string + Height int64 + HomeDir string + From string + BroadcastMode string + FromName string + SignModeStr string + UseLedger bool + Simulate bool + GenerateOnly bool + Offline bool + SkipConfirm bool + TxConfig TxConfig + AccountRetriever AccountRetriever + NodeURI string + FeePayer sdk.AccAddress + FeeGranter sdk.AccAddress + Viper *viper.Viper + LedgerHasProtobuf bool + PreprocessTxHook PreprocessTxFn + + // IsAux is true when the signer is an auxiliary signer (e.g. the tipper). + IsAux bool + + // TODO: Deprecated (remove). + LegacyAmino *codec.LegacyAmino + + // CmdContext is the context.Context from the Cobra command. + CmdContext context.Context +} + +// WithCmdContext returns a copy of the context with an updated context.Context, +// usually set to the cobra cmd context. +func (ctx Context) WithCmdContext(c context.Context) Context { + ctx.CmdContext = c + return ctx +} + +// WithKeyring returns a copy of the context with an updated keyring. +func (ctx Context) WithKeyring(k keyring.Keyring) Context { + ctx.Keyring = k + return ctx +} + +// WithKeyringOptions returns a copy of the context with an updated keyring. +func (ctx Context) WithKeyringOptions(opts ...keyring.Option) Context { + ctx.KeyringOptions = opts + return ctx +} + +// WithInput returns a copy of the context with an updated input. +func (ctx Context) WithInput(r io.Reader) Context { + // convert to a bufio.Reader to have a shared buffer between the keyring and the + // the Commands, ensuring a read from one advance the read pointer for the other. + // see https://github.com/cosmos/cosmos-sdk/issues/9566. + ctx.Input = bufio.NewReader(r) + return ctx +} + +// WithCodec returns a copy of the Context with an updated Codec. +func (ctx Context) WithCodec(m codec.Codec) Context { + ctx.Codec = m + return ctx +} + +// WithLegacyAmino returns a copy of the context with an updated LegacyAmino codec. +// TODO: Deprecated (remove). +func (ctx Context) WithLegacyAmino(cdc *codec.LegacyAmino) Context { + ctx.LegacyAmino = cdc + return ctx +} + +// WithOutput returns a copy of the context with an updated output writer (e.g. stdout). +func (ctx Context) WithOutput(w io.Writer) Context { + ctx.Output = w + return ctx +} + +// WithFrom returns a copy of the context with an updated from address or name. +func (ctx Context) WithFrom(from string) Context { + ctx.From = from + return ctx +} + +// WithOutputFormat returns a copy of the context with an updated OutputFormat field. +func (ctx Context) WithOutputFormat(format string) Context { + ctx.OutputFormat = format + return ctx +} + +// WithNodeURI returns a copy of the context with an updated node URI. +func (ctx Context) WithNodeURI(nodeURI string) Context { + ctx.NodeURI = nodeURI + return ctx +} + +// WithHeight returns a copy of the context with an updated height. +func (ctx Context) WithHeight(height int64) Context { + ctx.Height = height + return ctx +} + +// WithClient returns a copy of the context with an updated RPC client +// instance. +func (ctx Context) WithClient(client CometRPC) Context { + ctx.Client = client + return ctx +} + +// WithGRPCClient returns a copy of the context with an updated GRPC client +// instance. +func (ctx Context) WithGRPCClient(grpcClient *grpc.ClientConn) Context { + ctx.GRPCClient = grpcClient + return ctx +} + +// WithGRPCConnProvider returns a copy of the context with an updated GRPCConnProvider. +func (ctx Context) WithGRPCConnProvider(provider *GRPCConnProvider) Context { + ctx.GRPCConnProvider = provider + return ctx +} + +// GetGRPCConn returns the appropriate gRPC connection for the given height. +// If GRPCConnProvider is set, it uses it to determine the connection. +// Otherwise, it falls back to the default GRPCClient. +func (ctx Context) GetGRPCConn(height int64) *grpc.ClientConn { + if ctx.GRPCConnProvider != nil { + return ctx.GRPCConnProvider.GetGRPCConn(height) + } + return ctx.GRPCClient +} + +// WithUseLedger returns a copy of the context with an updated UseLedger flag. +func (ctx Context) WithUseLedger(useLedger bool) Context { + ctx.UseLedger = useLedger + return ctx +} + +// WithChainID returns a copy of the context with an updated chain ID. +func (ctx Context) WithChainID(chainID string) Context { + ctx.ChainID = chainID + return ctx +} + +// WithHomeDir returns a copy of the Context with HomeDir set. +func (ctx Context) WithHomeDir(dir string) Context { + if dir != "" { + ctx.HomeDir = dir + } + return ctx +} + +// WithKeyringDir returns a copy of the Context with KeyringDir set. +func (ctx Context) WithKeyringDir(dir string) Context { + ctx.KeyringDir = dir + return ctx +} + +// WithKeyringDefaultKeyName returns a copy of the Context with KeyringDefaultKeyName set. +func (ctx Context) WithKeyringDefaultKeyName(keyName string) Context { + ctx.KeyringDefaultKeyName = keyName + return ctx +} + +// WithGenerateOnly returns a copy of the context with updated GenerateOnly value +func (ctx Context) WithGenerateOnly(generateOnly bool) Context { + ctx.GenerateOnly = generateOnly + return ctx +} + +// WithSimulation returns a copy of the context with updated Simulate value +func (ctx Context) WithSimulation(simulate bool) Context { + ctx.Simulate = simulate + return ctx +} + +// WithOffline returns a copy of the context with updated Offline value. +func (ctx Context) WithOffline(offline bool) Context { + ctx.Offline = offline + return ctx +} + +// WithFromName returns a copy of the context with an updated from account name. +func (ctx Context) WithFromName(name string) Context { + ctx.FromName = name + return ctx +} + +// WithFromAddress returns a copy of the context with an updated from account +// address. +func (ctx Context) WithFromAddress(addr sdk.AccAddress) Context { + ctx.FromAddress = addr + return ctx +} + +// WithFeePayerAddress returns a copy of the context with an updated fee payer account +// address. +func (ctx Context) WithFeePayerAddress(addr sdk.AccAddress) Context { + ctx.FeePayer = addr + return ctx +} + +// WithFeeGranterAddress returns a copy of the context with an updated fee granter account +// address. +func (ctx Context) WithFeeGranterAddress(addr sdk.AccAddress) Context { + ctx.FeeGranter = addr + return ctx +} + +// WithBroadcastMode returns a copy of the context with an updated broadcast +// mode. +func (ctx Context) WithBroadcastMode(mode string) Context { + ctx.BroadcastMode = mode + return ctx +} + +// WithSignModeStr returns a copy of the context with an updated SignMode +// value. +func (ctx Context) WithSignModeStr(signModeStr string) Context { + ctx.SignModeStr = signModeStr + return ctx +} + +// WithSkipConfirmation returns a copy of the context with an updated SkipConfirm +// value. +func (ctx Context) WithSkipConfirmation(skip bool) Context { + ctx.SkipConfirm = skip + return ctx +} + +// WithTxConfig returns the context with an updated TxConfig +func (ctx Context) WithTxConfig(generator TxConfig) Context { + ctx.TxConfig = generator + return ctx +} + +// WithAccountRetriever returns the context with an updated AccountRetriever +func (ctx Context) WithAccountRetriever(retriever AccountRetriever) Context { + ctx.AccountRetriever = retriever + return ctx +} + +// WithInterfaceRegistry returns the context with an updated InterfaceRegistry +func (ctx Context) WithInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) Context { + ctx.InterfaceRegistry = interfaceRegistry + return ctx +} + +// WithViper returns the context with Viper field. This Viper instance is used to read +// client-side config from the config file. +func (ctx Context) WithViper(prefix string) Context { + v := viper.New() + + if prefix == "" { + executableName, _ := os.Executable() + prefix = path.Base(executableName) + } + + v.SetEnvPrefix(prefix) + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + v.AutomaticEnv() + ctx.Viper = v + return ctx +} + +// WithAux returns a copy of the context with an updated IsAux value. +func (ctx Context) WithAux(isAux bool) Context { + ctx.IsAux = isAux + return ctx +} + +// WithLedgerHasProto returns the context with the provided boolean value, indicating +// whether the target Ledger application can support Protobuf payloads. +func (ctx Context) WithLedgerHasProtobuf(val bool) Context { + ctx.LedgerHasProtobuf = val + return ctx +} + +// WithPreprocessTxHook returns the context with the provided preprocessing hook, which +// enables chains to preprocess the transaction using the builder. +func (ctx Context) WithPreprocessTxHook(preprocessFn PreprocessTxFn) Context { + ctx.PreprocessTxHook = preprocessFn + return ctx +} + +// PrintString prints the raw string to ctx.Output if it's defined, otherwise to os.Stdout +func (ctx Context) PrintString(str string) error { + return ctx.PrintBytes([]byte(str)) +} + +// PrintBytes prints the raw bytes to ctx.Output if it's defined, otherwise to os.Stdout. +// NOTE: for printing a complex state object, you should use ctx.PrintOutput +func (ctx Context) PrintBytes(o []byte) error { + writer := ctx.Output + if writer == nil { + writer = os.Stdout + } + + _, err := writer.Write(o) + return err +} + +// PrintProto outputs toPrint to the ctx.Output based on ctx.OutputFormat which is +// either text or json. If text, toPrint will be YAML encoded. Otherwise, toPrint +// will be JSON encoded using ctx.Codec. An error is returned upon failure. +func (ctx Context) PrintProto(toPrint proto.Message) error { + // always serialize JSON initially because proto json can't be directly YAML encoded + out, err := ctx.Codec.MarshalJSON(toPrint) + if err != nil { + return err + } + return ctx.printOutput(out) +} + +// PrintObjectLegacy is a variant of PrintProto that doesn't require a proto.Message type +// and uses amino JSON encoding. +// +// Deprecated: It will be removed in the near future! +func (ctx Context) PrintObjectLegacy(toPrint any) error { + out, err := ctx.LegacyAmino.MarshalJSON(toPrint) + if err != nil { + return err + } + return ctx.printOutput(out) +} + +// PrintRaw is a variant of PrintProto that doesn't require a proto.Message type +// and uses a raw JSON message. No marshaling is performed. +func (ctx Context) PrintRaw(toPrint json.RawMessage) error { + return ctx.printOutput(toPrint) +} + +func (ctx Context) printOutput(out []byte) error { + var err error + if ctx.OutputFormat == "text" { + out, err = yaml.JSONToYAML(out) + if err != nil { + return err + } + } + + writer := ctx.Output + if writer == nil { + writer = os.Stdout + } + + _, err = writer.Write(out) + if err != nil { + return err + } + + if ctx.OutputFormat != "text" { + // append new-line for formats besides YAML + _, err = writer.Write([]byte("\n")) + if err != nil { + return err + } + } + + return nil +} + +// GetFromFields returns a from account address, account name and keyring type, given either an address or key name. +// If clientCtx.Simulate is true the keystore is not accessed and a valid address must be provided +// If clientCtx.GenerateOnly is true the keystore is only accessed if a key name is provided +// If from is empty, the default key if specified in the context will be used +func GetFromFields(clientCtx Context, kr keyring.Keyring, from string) (sdk.AccAddress, string, keyring.KeyType, error) { + if from == "" && clientCtx.KeyringDefaultKeyName != "" { + from = clientCtx.KeyringDefaultKeyName + _ = clientCtx.PrintString(fmt.Sprintf("No key name or address provided; using the default key: %s\n", clientCtx.KeyringDefaultKeyName)) + } + + if from == "" { + return nil, "", 0, nil + } + + addr, err := sdk.AccAddressFromBech32(from) + switch { + case clientCtx.Simulate: + if err != nil { + return nil, "", 0, fmt.Errorf("a valid bech32 address must be provided in simulation mode: %w", err) + } + + return addr, "", 0, nil + + case clientCtx.GenerateOnly: + if err == nil { + return addr, "", 0, nil + } + } + + var k *keyring.Record + if err == nil { + k, err = kr.KeyByAddress(addr) + if err != nil { + return nil, "", 0, err + } + } else { + k, err = kr.Key(from) + if err != nil { + return nil, "", 0, err + } + } + + addr, err = k.GetAddress() + if err != nil { + return nil, "", 0, err + } + + return addr, k.Name, k.GetType(), nil +} + +// NewKeyringFromBackend gets a Keyring object from a backend +func NewKeyringFromBackend(ctx Context, backend string) (keyring.Keyring, error) { + if ctx.Simulate { + backend = keyring.BackendMemory + } + + return keyring.New(sdk.KeyringServiceName(), backend, ctx.KeyringDir, ctx.Input, ctx.Codec, ctx.KeyringOptions...) +} diff --git a/client/context_test.go b/client/context_test.go new file mode 100644 index 0000000..b3ee879 --- /dev/null +++ b/client/context_test.go @@ -0,0 +1,248 @@ +package client_test + +import ( + "bytes" + "encoding/json" + "os" + "strings" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + "github.com/cosmos/cosmos-sdk/types/module/testutil" +) + +func TestMain(m *testing.M) { + viper.Set(flags.FlagKeyringBackend, keyring.BackendMemory) + os.Exit(m.Run()) +} + +func TestContext_PrintProto(t *testing.T) { + ctx := client.Context{} + + animal := &testdata.Dog{ + Size_: "big", + Name: "Spot", + } + anyAnimal, err := types.NewAnyWithValue(animal) + require.NoError(t, err) + hasAnimal := &testdata.HasAnimal{ + Animal: anyAnimal, + X: 10, + } + + // proto + registry := testdata.NewTestInterfaceRegistry() + ctx = ctx.WithCodec(codec.NewProtoCodec(registry)) + + // json + buf := &bytes.Buffer{} + ctx = ctx.WithOutput(buf) + ctx.OutputFormat = flags.OutputFormatJSON + err = ctx.PrintProto(hasAnimal) + require.NoError(t, err) + require.Equal(t, + `{"animal":{"@type":"/testpb.Dog","size":"big","name":"Spot"},"x":"10"} +`, buf.String()) + + // yaml + buf = &bytes.Buffer{} + ctx = ctx.WithOutput(buf) + ctx.OutputFormat = flags.OutputFormatText + err = ctx.PrintProto(hasAnimal) + require.NoError(t, err) + require.Equal(t, + `animal: + '@type': /testpb.Dog + name: Spot + size: big +x: "10" +`, buf.String()) +} + +func TestContext_PrintObjectLegacy(t *testing.T) { + ctx := client.Context{} + + animal := &testdata.Dog{ + Size_: "big", + Name: "Spot", + } + anyAnimal, err := types.NewAnyWithValue(animal) + require.NoError(t, err) + hasAnimal := &testdata.HasAnimal{ + Animal: anyAnimal, + X: 10, + } + + // amino + amino := testdata.NewTestAmino() + ctx = ctx.WithLegacyAmino(&codec.LegacyAmino{Amino: amino}) + + // json + buf := &bytes.Buffer{} + ctx = ctx.WithOutput(buf) + ctx.OutputFormat = flags.OutputFormatJSON + err = ctx.PrintObjectLegacy(hasAnimal) + require.NoError(t, err) + require.Equal(t, + `{"type":"testpb/HasAnimal","value":{"animal":{"type":"testpb/Dog","value":{"size":"big","name":"Spot"}},"x":"10"}} +`, buf.String()) + + // yaml + buf = &bytes.Buffer{} + ctx = ctx.WithOutput(buf) + ctx.OutputFormat = flags.OutputFormatText + err = ctx.PrintObjectLegacy(hasAnimal) + require.NoError(t, err) + require.Equal(t, + `type: testpb/HasAnimal +value: + animal: + type: testpb/Dog + value: + name: Spot + size: big + x: "10" +`, buf.String()) +} + +func TestContext_PrintRaw(t *testing.T) { + ctx := client.Context{} + hasAnimal := json.RawMessage(`{"animal":{"@type":"/testpb.Dog","size":"big","name":"Spot"},"x":"10"}`) + + // json + buf := &bytes.Buffer{} + ctx = ctx.WithOutput(buf) + ctx.OutputFormat = flags.OutputFormatJSON + err := ctx.PrintRaw(hasAnimal) + require.NoError(t, err) + require.Equal(t, + `{"animal":{"@type":"/testpb.Dog","size":"big","name":"Spot"},"x":"10"} +`, buf.String()) + + // yaml + buf = &bytes.Buffer{} + ctx = ctx.WithOutput(buf) + ctx.OutputFormat = flags.OutputFormatText + err = ctx.PrintRaw(hasAnimal) + require.NoError(t, err) + require.Equal(t, + `animal: + '@type': /testpb.Dog + name: Spot + size: big +x: "10" +`, buf.String()) +} + +func TestGetFromFields(t *testing.T) { + cfg := testutil.MakeTestEncodingConfig() + path := hd.CreateHDPath(118, 0, 0).String() + + testCases := []struct { + clientCtx client.Context + keyring func() keyring.Keyring + from string + expectedErr string + }{ + { + keyring: func() keyring.Keyring { + kb := keyring.NewInMemory(cfg.Codec) + + _, _, err := kb.NewMnemonic("alice", keyring.English, path, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + require.NoError(t, err) + + return kb + }, + from: "alice", + }, + { + keyring: func() keyring.Keyring { + kb, err := keyring.New(t.Name(), keyring.BackendTest, t.TempDir(), nil, cfg.Codec) + require.NoError(t, err) + + _, _, err = kb.NewMnemonic("alice", keyring.English, path, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + require.NoError(t, err) + + return kb + }, + from: "alice", + }, + { + keyring: func() keyring.Keyring { + return keyring.NewInMemory(cfg.Codec) + }, + from: "cosmos139f7kncmglres2nf3h4hc4tade85ekfr8sulz5", + expectedErr: "key with address cosmos139f7kncmglres2nf3h4hc4tade85ekfr8sulz5 not found: key not found", + }, + { + keyring: func() keyring.Keyring { + kb, err := keyring.New(t.Name(), keyring.BackendTest, t.TempDir(), nil, cfg.Codec) + require.NoError(t, err) + return kb + }, + from: "alice", + expectedErr: "alice.info: key not found", + }, + { + keyring: func() keyring.Keyring { + return keyring.NewInMemory(cfg.Codec) + }, + from: "cosmos139f7kncmglres2nf3h4hc4tade85ekfr8sulz5", + clientCtx: client.Context{}.WithSimulation(true), + }, + { + keyring: func() keyring.Keyring { + return keyring.NewInMemory(cfg.Codec) + }, + from: "alice", + clientCtx: client.Context{}.WithSimulation(true), + expectedErr: "a valid bech32 address must be provided in simulation mode", + }, + { + keyring: func() keyring.Keyring { + return keyring.NewInMemory(cfg.Codec) + }, + from: "cosmos139f7kncmglres2nf3h4hc4tade85ekfr8sulz5", + clientCtx: client.Context{}.WithGenerateOnly(true), + }, + { + keyring: func() keyring.Keyring { + return keyring.NewInMemory(cfg.Codec) + }, + from: "alice", + clientCtx: client.Context{}.WithGenerateOnly(true), + expectedErr: "alice.info: key not found", + }, + { + keyring: func() keyring.Keyring { + kb, err := keyring.New(t.Name(), keyring.BackendTest, t.TempDir(), nil, cfg.Codec) + require.NoError(t, err) + + _, _, err = kb.NewMnemonic("alice", keyring.English, path, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + require.NoError(t, err) + + return kb + }, + clientCtx: client.Context{}.WithGenerateOnly(true), + from: "alice", + }, + } + + for _, tc := range testCases { + _, _, _, err := client.GetFromFields(tc.clientCtx, tc.keyring(), tc.from) + if tc.expectedErr == "" { + require.NoError(t, err) + } else { + require.True(t, strings.HasPrefix(err.Error(), tc.expectedErr)) + } + } +} diff --git a/client/debug/main.go b/client/debug/main.go new file mode 100644 index 0000000..7fdf411 --- /dev/null +++ b/client/debug/main.go @@ -0,0 +1,339 @@ +package debug + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/bech32/legacybech32" //nolint:staticcheck // retain for debug purposes + "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/version" +) + +var ( + flagPubkeyType = "type" + ed = "ed25519" +) + +// Cmd creates a main CLI command +func Cmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Short: "Tool for helping with debugging your application", + RunE: client.ValidateCmd, + } + + cmd.AddCommand(CodecCmd()) + cmd.AddCommand(PubkeyCmd()) + cmd.AddCommand(PubkeyRawCmd()) + cmd.AddCommand(AddrCmd()) + cmd.AddCommand(RawBytesCmd()) + cmd.AddCommand(PrefixesCmd()) + + return cmd +} + +// CodecCmd creates and returns a new codec debug cmd. +func CodecCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "codec", + Short: "Tool for helping with debugging your application codec", + RunE: client.ValidateCmd, + } + + cmd.AddCommand(getCodecInterfaces()) + cmd.AddCommand(getCodecInterfaceImpls()) + + return cmd +} + +// getCodecInterfaces creates and returns a new cmd used for listing all registered interfaces on the application codec. +func getCodecInterfaces() *cobra.Command { + return &cobra.Command{ + Use: "list-interfaces", + Short: "List all registered interface type URLs", + Long: "List all registered interface type URLs using the application codec", + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + iFaces := clientCtx.Codec.InterfaceRegistry().ListAllInterfaces() + for _, iFace := range iFaces { + cmd.Println(iFace) + } + return nil + }, + } +} + +// getCodecInterfaceImpls creates and returns a new cmd used for listing all registered implemenations of a given interface on the application codec. +func getCodecInterfaceImpls() *cobra.Command { + return &cobra.Command{ + Use: "list-implementations [interface]", + Short: "List the registered type URLs for the provided interface", + Long: "List the registered type URLs that can be used for the provided interface name using the application codec", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + impls := clientCtx.Codec.InterfaceRegistry().ListImplementations(args[0]) + for _, imp := range impls { + cmd.Println(imp) + } + return nil + }, + } +} + +// getPubKeyFromString decodes SDK PubKey using JSON marshaler. +func getPubKeyFromString(ctx client.Context, pkstr string) (cryptotypes.PubKey, error) { + var pk cryptotypes.PubKey + err := ctx.Codec.UnmarshalInterfaceJSON([]byte(pkstr), &pk) + return pk, err +} + +func PubkeyCmd() *cobra.Command { + return &cobra.Command{ + Use: "pubkey [pubkey]", + Short: "Decode a pubkey from proto JSON", + Long: fmt.Sprintf(`Decode a pubkey from proto JSON and display it's address. + +Example: +$ %s debug pubkey '{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AurroA7jvfPd1AadmmOvWM2rJSwipXfRf8yD6pLbA2DJ"}' + `, version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + pk, err := getPubKeyFromString(clientCtx, args[0]) + if err != nil { + return err + } + cmd.Println("Address:", pk.Address()) + cmd.Println("PubKey Hex:", hex.EncodeToString(pk.Bytes())) + return nil + }, + } +} + +func bytesToPubkey(bz []byte, keytype string) (cryptotypes.PubKey, bool) { + if keytype == ed { + if len(bz) == ed25519.PubKeySize { + return &ed25519.PubKey{Key: bz}, true + } + } + + if len(bz) == secp256k1.PubKeySize { + return &secp256k1.PubKey{Key: bz}, true + } + return nil, false +} + +// getPubKeyFromRawString returns a PubKey (PubKeyEd25519 or PubKeySecp256k1) by attempting +// to decode the pubkey string from hex, base64, and finally bech32. If all +// encodings fail, an error is returned. +func getPubKeyFromRawString(pkstr, keytype string) (cryptotypes.PubKey, error) { + // Try hex decoding + bz, err := hex.DecodeString(pkstr) + if err == nil { + pk, ok := bytesToPubkey(bz, keytype) + if ok { + return pk, nil + } + } + + bz, err = base64.StdEncoding.DecodeString(pkstr) + if err == nil { + pk, ok := bytesToPubkey(bz, keytype) + if ok { + return pk, nil + } + } + + pk, err := legacybech32.UnmarshalPubKey(legacybech32.AccPK, pkstr) + if err == nil { + return pk, nil + } + + pk, err = legacybech32.UnmarshalPubKey(legacybech32.ValPK, pkstr) + if err == nil { + return pk, nil + } + + pk, err = legacybech32.UnmarshalPubKey(legacybech32.ConsPK, pkstr) + if err == nil { + return pk, nil + } + + return nil, fmt.Errorf("pubkey '%s' invalid; expected hex, base64, or bech32 of correct size", pkstr) +} + +func PubkeyRawCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "pubkey-raw [pubkey] -t [{ed25519, secp256k1}]", + Short: "Decode a ED25519 or secp256k1 pubkey from hex, base64, or bech32", + Long: "Decode a pubkey from hex, base64, or bech32.", + Example: fmt.Sprintf(` +%s debug pubkey-raw 8FCA9D6D1F80947FD5E9A05309259746F5F72541121766D5F921339DD061174A +%s debug pubkey-raw j8qdbR+AlH/V6aBTCSWXRvX3JUESF2bV+SEzndBhF0o= +%s debug pubkey-raw cosmospub1zcjduepq3l9f6mglsz28l40f5pfsjfvhgm6lwf2pzgtkd40eyyeem5rpza9q47axrz + `, version.AppName, version.AppName, version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + pubkeyType, err := cmd.Flags().GetString(flagPubkeyType) + if err != nil { + return err + } + pubkeyType = strings.ToLower(pubkeyType) + if pubkeyType != "secp256k1" && pubkeyType != ed { + return errorsmod.Wrapf(errors.ErrInvalidType, "invalid pubkey type, expected oneof ed25519 or secp256k1") + } + + pk, err := getPubKeyFromRawString(args[0], pubkeyType) + if err != nil { + return err + } + + var consensusPub string + edPK, ok := pk.(*ed25519.PubKey) + if ok && pubkeyType == ed { + consensusPub, err = legacybech32.MarshalPubKey(legacybech32.ConsPK, edPK) + if err != nil { + return err + } + + cmd.Printf("Hex: %X\n", edPK.Key) + } + cmd.Println("Parsed key as", pk.Type()) + + pubKeyJSONBytes, err := clientCtx.LegacyAmino.MarshalJSON(pk) + if err != nil { + return err + } + accPub, err := legacybech32.MarshalPubKey(legacybech32.AccPK, pk) + if err != nil { + return err + } + valPub, err := legacybech32.MarshalPubKey(legacybech32.ValPK, pk) + if err != nil { + return err + } + cmd.Println("Address:", pk.Address()) + cmd.Println("JSON (base64):", string(pubKeyJSONBytes)) + cmd.Println("Bech32 Acc:", accPub) + cmd.Println("Bech32 Validator Operator:", valPub) + if pubkeyType == "ed25519" { + cmd.Println("Bech32 Validator Consensus:", consensusPub) + } + + return nil + }, + } + cmd.Flags().StringP(flagPubkeyType, "t", ed, "Pubkey type to decode (oneof secp256k1, ed25519)") + return cmd +} + +func AddrCmd() *cobra.Command { + return &cobra.Command{ + Use: "addr [address]", + Short: "Convert an address between hex and bech32", + Long: fmt.Sprintf(`Convert an address between hex encoding and bech32. + +Example: +$ %s debug addr cosmos1e0jnq2sun3dzjh8p2xq95kk0expwmd7shwjpfg + `, version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + addrString := args[0] + // try hex, then bech32 + var ( + addr []byte + err error + ) + decodeFns := []func(text string) ([]byte, error){ + hex.DecodeString, + func(text string) ([]byte, error) { return sdk.AccAddressFromBech32(text) }, + func(text string) ([]byte, error) { return sdk.ValAddressFromBech32(text) }, + func(text string) ([]byte, error) { return sdk.ConsAddressFromBech32(text) }, + } + errs := make([]any, 0, len(decodeFns)) + for _, fn := range decodeFns { + if addr, err = fn(addrString); err == nil { + break + } + errs = append(errs, err) + } + if len(errs) == len(decodeFns) { + errTags := []string{ + "hex", "bech32 acc", "bech32 val", "bech32 con", + } + format := "" + for i := range errs { + if format != "" { + format += ", " + } + format += errTags[i] + ": %w" + } + return fmt.Errorf("expected hex or bech32. Got errors: "+format, errs...) + } + + cmd.Println("Address:", addr) + cmd.Printf("Address (hex): %X\n", addr) + cmd.Printf("Bech32 Acc: %s\n", sdk.AccAddress(addr)) + cmd.Printf("Bech32 Val: %s\n", sdk.ValAddress(addr)) + cmd.Printf("Bech32 Con: %s\n", sdk.ConsAddress(addr)) + return nil + }, + } +} + +func RawBytesCmd() *cobra.Command { + return &cobra.Command{ + Use: "raw-bytes ", + Short: "Convert raw bytes output (eg. [10 21 13 255]) to hex", + Long: "Convert raw-bytes to hex.", + Example: fmt.Sprintf("%s debug raw-bytes '[72 101 108 108 111 44 32 112 108 97 121 103 114 111 117 110 100]'", version.AppName), + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + stringBytes := args[0] + stringBytes = strings.Trim(stringBytes, "[") + stringBytes = strings.Trim(stringBytes, "]") + spl := strings.Split(stringBytes, " ") + + byteArray := []byte{} + for _, s := range spl { + b, err := strconv.ParseInt(s, 10, 8) + if err != nil { + return err + } + byteArray = append(byteArray, byte(b)) + } + fmt.Printf("%X\n", byteArray) + return nil + }, + } +} + +func PrefixesCmd() *cobra.Command { + return &cobra.Command{ + Use: "prefixes", + Short: "List prefixes used for Human-Readable Part (HRP) in Bech32", + Long: "List prefixes used in Bech32 addresses.", + Example: fmt.Sprintf("$ %s debug prefixes", version.AppName), + RunE: func(cmd *cobra.Command, args []string) error { + cmd.Printf("Bech32 Acc: %s\n", sdk.GetConfig().GetBech32AccountAddrPrefix()) + cmd.Printf("Bech32 Val: %s\n", sdk.GetConfig().GetBech32ValidatorAddrPrefix()) + cmd.Printf("Bech32 Con: %s\n", sdk.GetConfig().GetBech32ConsensusAddrPrefix()) + return nil + }, + } +} diff --git a/client/docs/config.json b/client/docs/config.json new file mode 100644 index 0000000..e43504c --- /dev/null +++ b/client/docs/config.json @@ -0,0 +1,179 @@ +{ + "swagger": "2.0", + "info": { + "title": "Cosmos SDK - gRPC Gateway docs", + "description": "A REST interface for state queries.", + "version": "1.0.0" + }, + "apis": [ + { + "url": "./tmp-swagger-gen/cosmos/auth/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "AuthParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/bank/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "BankParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/base/tendermint/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "BaseParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/base/node/v1beta1/query.swagger.json" + }, + { + "url": "./tmp-swagger-gen/cosmos/distribution/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "DistributionParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/evidence/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "EvidenceParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/gov/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "GovParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/gov/v1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "GovV1Params", + "Proposal": "GovV1Proposal", + "Proposals": "GovV1Proposal", + "Vote": "GovV1Vote", + "Votes": "GovV1Votes", + "Deposit": "GovV1Deposit", + "Deposits": "GovV1Deposit", + "TallyResult": "GovV1TallyResult" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/mint/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "MintParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/params/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "Params" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/slashing/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "SlashingParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/staking/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "StakingParams", + "DelegatorValidators": "StakingDelegatorValidators" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/tx/v1beta1/service.swagger.json", + "dereference": { + "circular": "ignore" + } + }, + { + "url": "./tmp-swagger-gen/cosmos/upgrade/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "UpgradeParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/authz/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "AuthzParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/feegrant/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "FeegrantParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/nft/v1beta1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "NftParams", + "Balance": "NftBalance" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/group/v1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "GroupParams", + "Proposal": "GroupProposal", + "TallyResult": "GroupTallyResult" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/circuit/v1/query.swagger.json", + "operationIds": { + "rename": { + "Accounts": "CircuitAccounts", + "Account": "CircuitAccount", + "Params": "CircuitParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/consensus/v1/query.swagger.json", + "operationIds": { + "rename": { + "Params": "ConsensusParams" + } + } + }, + { + "url": "./tmp-swagger-gen/cosmos/app/v1alpha1/query.swagger.json" + } + ] +} \ No newline at end of file diff --git a/client/docs/embed.go b/client/docs/embed.go new file mode 100644 index 0000000..25ad904 --- /dev/null +++ b/client/docs/embed.go @@ -0,0 +1,6 @@ +package docs + +import "embed" + +//go:embed swagger-ui +var SwaggerUI embed.FS diff --git a/client/docs/swagger-ui/favicon-16x16.png b/client/docs/swagger-ui/favicon-16x16.png new file mode 100644 index 0000000..09e1928 Binary files /dev/null and b/client/docs/swagger-ui/favicon-16x16.png differ diff --git a/client/docs/swagger-ui/favicon-32x32.png b/client/docs/swagger-ui/favicon-32x32.png new file mode 100644 index 0000000..34e97f9 Binary files /dev/null and b/client/docs/swagger-ui/favicon-32x32.png differ diff --git a/client/docs/swagger-ui/index.html b/client/docs/swagger-ui/index.html new file mode 100644 index 0000000..4c16e69 --- /dev/null +++ b/client/docs/swagger-ui/index.html @@ -0,0 +1,61 @@ + + + + + + Swagger UI + + + + + + + +
+ + + + + + diff --git a/client/docs/swagger-ui/oauth2-redirect.html b/client/docs/swagger-ui/oauth2-redirect.html new file mode 100644 index 0000000..87b45a0 --- /dev/null +++ b/client/docs/swagger-ui/oauth2-redirect.html @@ -0,0 +1,75 @@ + + + + Swagger UI: OAuth2 Redirect + + + + + diff --git a/client/docs/swagger-ui/swagger-ui-bundle.js b/client/docs/swagger-ui/swagger-ui-bundle.js new file mode 100644 index 0000000..1706bcf --- /dev/null +++ b/client/docs/swagger-ui/swagger-ui-bundle.js @@ -0,0 +1,3 @@ +/*! For license information please see swagger-ui-bundle.js.LICENSE.txt */ +!function(e,t){"object"==typeof exports&&"object"==typeof module?module.exports=t():"function"==typeof define&&define.amd?define([],t):"object"==typeof exports?exports.SwaggerUIBundle=t():e.SwaggerUIBundle=t()}(this,(function(){return function(e){var t={};function n(r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}return n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="/dist",n(n.s=479)}([function(e,t,n){"use strict";e.exports=n(560)},function(e,t,n){e.exports=function(){"use strict";var e=Array.prototype.slice;function t(e,t){t&&(e.prototype=Object.create(t.prototype)),e.prototype.constructor=e}function n(e){return i(e)?e:J(e)}function r(e){return u(e)?e:K(e)}function o(e){return s(e)?e:Y(e)}function a(e){return i(e)&&!c(e)?e:G(e)}function i(e){return!(!e||!e[f])}function u(e){return!(!e||!e[p])}function s(e){return!(!e||!e[h])}function c(e){return u(e)||s(e)}function l(e){return!(!e||!e[d])}t(r,n),t(o,n),t(a,n),n.isIterable=i,n.isKeyed=u,n.isIndexed=s,n.isAssociative=c,n.isOrdered=l,n.Keyed=r,n.Indexed=o,n.Set=a;var f="@@__IMMUTABLE_ITERABLE__@@",p="@@__IMMUTABLE_KEYED__@@",h="@@__IMMUTABLE_INDEXED__@@",d="@@__IMMUTABLE_ORDERED__@@",m="delete",v=5,g=1<>>0;if(""+n!==t||4294967295===n)return NaN;t=n}return t<0?A(e)+t:t}function C(){return!0}function j(e,t,n){return(0===e||void 0!==n&&e<=-n)&&(void 0===t||void 0!==n&&t>=n)}function T(e,t){return P(e,t,0)}function I(e,t){return P(e,t,t)}function P(e,t,n){return void 0===e?n:e<0?Math.max(0,t+e):void 0===t?e:Math.min(t,e)}var N=0,M=1,R=2,D="function"==typeof Symbol&&Symbol.iterator,L="@@iterator",B=D||L;function F(e){this.next=e}function z(e,t,n,r){var o=0===e?t:1===e?n:[t,n];return r?r.value=o:r={value:o,done:!1},r}function U(){return{value:void 0,done:!0}}function q(e){return!!H(e)}function V(e){return e&&"function"==typeof e.next}function W(e){var t=H(e);return t&&t.call(e)}function H(e){var t=e&&(D&&e[D]||e[L]);if("function"==typeof t)return t}function $(e){return e&&"number"==typeof e.length}function J(e){return null==e?ie():i(e)?e.toSeq():ce(e)}function K(e){return null==e?ie().toKeyedSeq():i(e)?u(e)?e.toSeq():e.fromEntrySeq():ue(e)}function Y(e){return null==e?ie():i(e)?u(e)?e.entrySeq():e.toIndexedSeq():se(e)}function G(e){return(null==e?ie():i(e)?u(e)?e.entrySeq():e:se(e)).toSetSeq()}F.prototype.toString=function(){return"[Iterator]"},F.KEYS=N,F.VALUES=M,F.ENTRIES=R,F.prototype.inspect=F.prototype.toSource=function(){return this.toString()},F.prototype[B]=function(){return this},t(J,n),J.of=function(){return J(arguments)},J.prototype.toSeq=function(){return this},J.prototype.toString=function(){return this.__toString("Seq {","}")},J.prototype.cacheResult=function(){return!this._cache&&this.__iterateUncached&&(this._cache=this.entrySeq().toArray(),this.size=this._cache.length),this},J.prototype.__iterate=function(e,t){return fe(this,e,t,!0)},J.prototype.__iterator=function(e,t){return pe(this,e,t,!0)},t(K,J),K.prototype.toKeyedSeq=function(){return this},t(Y,J),Y.of=function(){return Y(arguments)},Y.prototype.toIndexedSeq=function(){return this},Y.prototype.toString=function(){return this.__toString("Seq [","]")},Y.prototype.__iterate=function(e,t){return fe(this,e,t,!1)},Y.prototype.__iterator=function(e,t){return pe(this,e,t,!1)},t(G,J),G.of=function(){return G(arguments)},G.prototype.toSetSeq=function(){return this},J.isSeq=ae,J.Keyed=K,J.Set=G,J.Indexed=Y;var Q,Z,X,ee="@@__IMMUTABLE_SEQ__@@";function te(e){this._array=e,this.size=e.length}function ne(e){var t=Object.keys(e);this._object=e,this._keys=t,this.size=t.length}function re(e){this._iterable=e,this.size=e.length||e.size}function oe(e){this._iterator=e,this._iteratorCache=[]}function ae(e){return!(!e||!e[ee])}function ie(){return Q||(Q=new te([]))}function ue(e){var t=Array.isArray(e)?new te(e).fromEntrySeq():V(e)?new oe(e).fromEntrySeq():q(e)?new re(e).fromEntrySeq():"object"==typeof e?new ne(e):void 0;if(!t)throw new TypeError("Expected Array or iterable object of [k, v] entries, or keyed object: "+e);return t}function se(e){var t=le(e);if(!t)throw new TypeError("Expected Array or iterable object of values: "+e);return t}function ce(e){var t=le(e)||"object"==typeof e&&new ne(e);if(!t)throw new TypeError("Expected Array or iterable object of values, or keyed object: "+e);return t}function le(e){return $(e)?new te(e):V(e)?new oe(e):q(e)?new re(e):void 0}function fe(e,t,n,r){var o=e._cache;if(o){for(var a=o.length-1,i=0;i<=a;i++){var u=o[n?a-i:i];if(!1===t(u[1],r?u[0]:i,e))return i+1}return i}return e.__iterateUncached(t,n)}function pe(e,t,n,r){var o=e._cache;if(o){var a=o.length-1,i=0;return new F((function(){var e=o[n?a-i:i];return i++>a?U():z(t,r?e[0]:i-1,e[1])}))}return e.__iteratorUncached(t,n)}function he(e,t){return t?de(t,e,"",{"":e}):me(e)}function de(e,t,n,r){return Array.isArray(t)?e.call(r,n,Y(t).map((function(n,r){return de(e,n,r,t)}))):ve(t)?e.call(r,n,K(t).map((function(n,r){return de(e,n,r,t)}))):t}function me(e){return Array.isArray(e)?Y(e).map(me).toList():ve(e)?K(e).map(me).toMap():e}function ve(e){return e&&(e.constructor===Object||void 0===e.constructor)}function ge(e,t){if(e===t||e!=e&&t!=t)return!0;if(!e||!t)return!1;if("function"==typeof e.valueOf&&"function"==typeof t.valueOf){if((e=e.valueOf())===(t=t.valueOf())||e!=e&&t!=t)return!0;if(!e||!t)return!1}return!("function"!=typeof e.equals||"function"!=typeof t.equals||!e.equals(t))}function ye(e,t){if(e===t)return!0;if(!i(t)||void 0!==e.size&&void 0!==t.size&&e.size!==t.size||void 0!==e.__hash&&void 0!==t.__hash&&e.__hash!==t.__hash||u(e)!==u(t)||s(e)!==s(t)||l(e)!==l(t))return!1;if(0===e.size&&0===t.size)return!0;var n=!c(e);if(l(e)){var r=e.entries();return t.every((function(e,t){var o=r.next().value;return o&&ge(o[1],e)&&(n||ge(o[0],t))}))&&r.next().done}var o=!1;if(void 0===e.size)if(void 0===t.size)"function"==typeof e.cacheResult&&e.cacheResult();else{o=!0;var a=e;e=t,t=a}var f=!0,p=t.__iterate((function(t,r){if(n?!e.has(t):o?!ge(t,e.get(r,b)):!ge(e.get(r,b),t))return f=!1,!1}));return f&&e.size===p}function be(e,t){if(!(this instanceof be))return new be(e,t);if(this._value=e,this.size=void 0===t?1/0:Math.max(0,t),0===this.size){if(Z)return Z;Z=this}}function we(e,t){if(!e)throw new Error(t)}function xe(e,t,n){if(!(this instanceof xe))return new xe(e,t,n);if(we(0!==n,"Cannot step a Range by 0"),e=e||0,void 0===t&&(t=1/0),n=void 0===n?1:Math.abs(n),tr?U():z(e,o,n[t?r-o++:o++])}))},t(ne,K),ne.prototype.get=function(e,t){return void 0===t||this.has(e)?this._object[e]:t},ne.prototype.has=function(e){return this._object.hasOwnProperty(e)},ne.prototype.__iterate=function(e,t){for(var n=this._object,r=this._keys,o=r.length-1,a=0;a<=o;a++){var i=r[t?o-a:a];if(!1===e(n[i],i,this))return a+1}return a},ne.prototype.__iterator=function(e,t){var n=this._object,r=this._keys,o=r.length-1,a=0;return new F((function(){var i=r[t?o-a:a];return a++>o?U():z(e,i,n[i])}))},ne.prototype[d]=!0,t(re,Y),re.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);var n=W(this._iterable),r=0;if(V(n))for(var o;!(o=n.next()).done&&!1!==e(o.value,r++,this););return r},re.prototype.__iteratorUncached=function(e,t){if(t)return this.cacheResult().__iterator(e,t);var n=W(this._iterable);if(!V(n))return new F(U);var r=0;return new F((function(){var t=n.next();return t.done?t:z(e,r++,t.value)}))},t(oe,Y),oe.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);for(var n,r=this._iterator,o=this._iteratorCache,a=0;a=r.length){var t=n.next();if(t.done)return t;r[o]=t.value}return z(e,o,r[o++])}))},t(be,Y),be.prototype.toString=function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},be.prototype.get=function(e,t){return this.has(e)?this._value:t},be.prototype.includes=function(e){return ge(this._value,e)},be.prototype.slice=function(e,t){var n=this.size;return j(e,t,n)?this:new be(this._value,I(t,n)-T(e,n))},be.prototype.reverse=function(){return this},be.prototype.indexOf=function(e){return ge(this._value,e)?0:-1},be.prototype.lastIndexOf=function(e){return ge(this._value,e)?this.size:-1},be.prototype.__iterate=function(e,t){for(var n=0;n=0&&t=0&&nn?U():z(e,a++,i)}))},xe.prototype.equals=function(e){return e instanceof xe?this._start===e._start&&this._end===e._end&&this._step===e._step:ye(this,e)},t(Ee,n),t(_e,Ee),t(Se,Ee),t(ke,Ee),Ee.Keyed=_e,Ee.Indexed=Se,Ee.Set=ke;var Ae="function"==typeof Math.imul&&-2===Math.imul(4294967295,2)?Math.imul:function(e,t){var n=65535&(e|=0),r=65535&(t|=0);return n*r+((e>>>16)*r+n*(t>>>16)<<16>>>0)|0};function Oe(e){return e>>>1&1073741824|3221225471&e}function Ce(e){if(!1===e||null==e)return 0;if("function"==typeof e.valueOf&&(!1===(e=e.valueOf())||null==e))return 0;if(!0===e)return 1;var t=typeof e;if("number"===t){if(e!=e||e===1/0)return 0;var n=0|e;for(n!==e&&(n^=4294967295*e);e>4294967295;)n^=e/=4294967295;return Oe(n)}if("string"===t)return e.length>Fe?je(e):Te(e);if("function"==typeof e.hashCode)return e.hashCode();if("object"===t)return Ie(e);if("function"==typeof e.toString)return Te(e.toString());throw new Error("Value type "+t+" cannot be hashed.")}function je(e){var t=qe[e];return void 0===t&&(t=Te(e),Ue===ze&&(Ue=0,qe={}),Ue++,qe[e]=t),t}function Te(e){for(var t=0,n=0;n0)switch(e.nodeType){case 1:return e.uniqueID;case 9:return e.documentElement&&e.documentElement.uniqueID}}var Re,De="function"==typeof WeakMap;De&&(Re=new WeakMap);var Le=0,Be="__immutablehash__";"function"==typeof Symbol&&(Be=Symbol(Be));var Fe=16,ze=255,Ue=0,qe={};function Ve(e){we(e!==1/0,"Cannot perform this action with an infinite size.")}function We(e){return null==e?ot():He(e)&&!l(e)?e:ot().withMutations((function(t){var n=r(e);Ve(n.size),n.forEach((function(e,n){return t.set(n,e)}))}))}function He(e){return!(!e||!e[Je])}t(We,_e),We.of=function(){var t=e.call(arguments,0);return ot().withMutations((function(e){for(var n=0;n=t.length)throw new Error("Missing value for key: "+t[n]);e.set(t[n],t[n+1])}}))},We.prototype.toString=function(){return this.__toString("Map {","}")},We.prototype.get=function(e,t){return this._root?this._root.get(0,void 0,e,t):t},We.prototype.set=function(e,t){return at(this,e,t)},We.prototype.setIn=function(e,t){return this.updateIn(e,b,(function(){return t}))},We.prototype.remove=function(e){return at(this,e,b)},We.prototype.deleteIn=function(e){return this.updateIn(e,(function(){return b}))},We.prototype.update=function(e,t,n){return 1===arguments.length?e(this):this.updateIn([e],t,n)},We.prototype.updateIn=function(e,t,n){n||(n=t,t=void 0);var r=vt(this,En(e),t,n);return r===b?void 0:r},We.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):ot()},We.prototype.merge=function(){return pt(this,void 0,arguments)},We.prototype.mergeWith=function(t){return pt(this,t,e.call(arguments,1))},We.prototype.mergeIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,ot(),(function(e){return"function"==typeof e.merge?e.merge.apply(e,n):n[n.length-1]}))},We.prototype.mergeDeep=function(){return pt(this,ht,arguments)},We.prototype.mergeDeepWith=function(t){var n=e.call(arguments,1);return pt(this,dt(t),n)},We.prototype.mergeDeepIn=function(t){var n=e.call(arguments,1);return this.updateIn(t,ot(),(function(e){return"function"==typeof e.mergeDeep?e.mergeDeep.apply(e,n):n[n.length-1]}))},We.prototype.sort=function(e){return qt(fn(this,e))},We.prototype.sortBy=function(e,t){return qt(fn(this,t,e))},We.prototype.withMutations=function(e){var t=this.asMutable();return e(t),t.wasAltered()?t.__ensureOwner(this.__ownerID):this},We.prototype.asMutable=function(){return this.__ownerID?this:this.__ensureOwner(new S)},We.prototype.asImmutable=function(){return this.__ensureOwner()},We.prototype.wasAltered=function(){return this.__altered},We.prototype.__iterator=function(e,t){return new et(this,e,t)},We.prototype.__iterate=function(e,t){var n=this,r=0;return this._root&&this._root.iterate((function(t){return r++,e(t[1],t[0],n)}),t),r},We.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?rt(this.size,this._root,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},We.isMap=He;var $e,Je="@@__IMMUTABLE_MAP__@@",Ke=We.prototype;function Ye(e,t){this.ownerID=e,this.entries=t}function Ge(e,t,n){this.ownerID=e,this.bitmap=t,this.nodes=n}function Qe(e,t,n){this.ownerID=e,this.count=t,this.nodes=n}function Ze(e,t,n){this.ownerID=e,this.keyHash=t,this.entries=n}function Xe(e,t,n){this.ownerID=e,this.keyHash=t,this.entry=n}function et(e,t,n){this._type=t,this._reverse=n,this._stack=e._root&&nt(e._root)}function tt(e,t){return z(e,t[0],t[1])}function nt(e,t){return{node:e,index:0,__prev:t}}function rt(e,t,n,r){var o=Object.create(Ke);return o.size=e,o._root=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function ot(){return $e||($e=rt(0))}function at(e,t,n){var r,o;if(e._root){var a=E(w),i=E(x);if(r=it(e._root,e.__ownerID,0,void 0,t,n,a,i),!i.value)return e;o=e.size+(a.value?n===b?-1:1:0)}else{if(n===b)return e;o=1,r=new Ye(e.__ownerID,[[t,n]])}return e.__ownerID?(e.size=o,e._root=r,e.__hash=void 0,e.__altered=!0,e):r?rt(o,r):ot()}function it(e,t,n,r,o,a,i,u){return e?e.update(t,n,r,o,a,i,u):a===b?e:(_(u),_(i),new Xe(t,r,[o,a]))}function ut(e){return e.constructor===Xe||e.constructor===Ze}function st(e,t,n,r,o){if(e.keyHash===r)return new Ze(t,r,[e.entry,o]);var a,i=(0===n?e.keyHash:e.keyHash>>>n)&y,u=(0===n?r:r>>>n)&y;return new Ge(t,1<>>=1)i[u]=1&n?t[a++]:void 0;return i[r]=o,new Qe(e,a+1,i)}function pt(e,t,n){for(var o=[],a=0;a>1&1431655765))+(e>>2&858993459))+(e>>4)&252645135,e+=e>>8,127&(e+=e>>16)}function yt(e,t,n,r){var o=r?e:k(e);return o[t]=n,o}function bt(e,t,n,r){var o=e.length+1;if(r&&t+1===o)return e[t]=n,e;for(var a=new Array(o),i=0,u=0;u=xt)return ct(e,s,r,o);var p=e&&e===this.ownerID,h=p?s:k(s);return f?u?c===l-1?h.pop():h[c]=h.pop():h[c]=[r,o]:h.push([r,o]),p?(this.entries=h,this):new Ye(e,h)}},Ge.prototype.get=function(e,t,n,r){void 0===t&&(t=Ce(n));var o=1<<((0===e?t:t>>>e)&y),a=this.bitmap;return 0==(a&o)?r:this.nodes[gt(a&o-1)].get(e+v,t,n,r)},Ge.prototype.update=function(e,t,n,r,o,a,i){void 0===n&&(n=Ce(r));var u=(0===t?n:n>>>t)&y,s=1<=Et)return ft(e,p,c,u,d);if(l&&!d&&2===p.length&&ut(p[1^f]))return p[1^f];if(l&&d&&1===p.length&&ut(d))return d;var m=e&&e===this.ownerID,g=l?d?c:c^s:c|s,w=l?d?yt(p,f,d,m):wt(p,f,m):bt(p,f,d,m);return m?(this.bitmap=g,this.nodes=w,this):new Ge(e,g,w)},Qe.prototype.get=function(e,t,n,r){void 0===t&&(t=Ce(n));var o=(0===e?t:t>>>e)&y,a=this.nodes[o];return a?a.get(e+v,t,n,r):r},Qe.prototype.update=function(e,t,n,r,o,a,i){void 0===n&&(n=Ce(r));var u=(0===t?n:n>>>t)&y,s=o===b,c=this.nodes,l=c[u];if(s&&!l)return this;var f=it(l,e,t+v,n,r,o,a,i);if(f===l)return this;var p=this.count;if(l){if(!f&&--p<_t)return lt(e,c,p,u)}else p++;var h=e&&e===this.ownerID,d=yt(c,u,f,h);return h?(this.count=p,this.nodes=d,this):new Qe(e,p,d)},Ze.prototype.get=function(e,t,n,r){for(var o=this.entries,a=0,i=o.length;a0&&r=0&&e>>t&y;if(r>=this.array.length)return new Ct([],e);var o,a=0===r;if(t>0){var i=this.array[r];if((o=i&&i.removeBefore(e,t-v,n))===i&&a)return this}if(a&&!o)return this;var u=Lt(this,e);if(!a)for(var s=0;s>>t&y;if(o>=this.array.length)return this;if(t>0){var a=this.array[o];if((r=a&&a.removeAfter(e,t-v,n))===a&&o===this.array.length-1)return this}var i=Lt(this,e);return i.array.splice(o+1),r&&(i.array[o]=r),i};var jt,Tt,It={};function Pt(e,t){var n=e._origin,r=e._capacity,o=Ut(r),a=e._tail;return i(e._root,e._level,0);function i(e,t,n){return 0===t?u(e,n):s(e,t,n)}function u(e,i){var u=i===o?a&&a.array:e&&e.array,s=i>n?0:n-i,c=r-i;return c>g&&(c=g),function(){if(s===c)return It;var e=t?--c:s++;return u&&u[e]}}function s(e,o,a){var u,s=e&&e.array,c=a>n?0:n-a>>o,l=1+(r-a>>o);return l>g&&(l=g),function(){for(;;){if(u){var e=u();if(e!==It)return e;u=null}if(c===l)return It;var n=t?--l:c++;u=i(s&&s[n],o-v,a+(n<=e.size||t<0)return e.withMutations((function(e){t<0?Ft(e,t).set(0,n):Ft(e,0,t+1).set(t,n)}));t+=e._origin;var r=e._tail,o=e._root,a=E(x);return t>=Ut(e._capacity)?r=Dt(r,e.__ownerID,0,t,n,a):o=Dt(o,e.__ownerID,e._level,t,n,a),a.value?e.__ownerID?(e._root=o,e._tail=r,e.__hash=void 0,e.__altered=!0,e):Nt(e._origin,e._capacity,e._level,o,r):e}function Dt(e,t,n,r,o,a){var i,u=r>>>n&y,s=e&&u0){var c=e&&e.array[u],l=Dt(c,t,n-v,r,o,a);return l===c?e:((i=Lt(e,t)).array[u]=l,i)}return s&&e.array[u]===o?e:(_(a),i=Lt(e,t),void 0===o&&u===i.array.length-1?i.array.pop():i.array[u]=o,i)}function Lt(e,t){return t&&e&&t===e.ownerID?e:new Ct(e?e.array.slice():[],t)}function Bt(e,t){if(t>=Ut(e._capacity))return e._tail;if(t<1<0;)n=n.array[t>>>r&y],r-=v;return n}}function Ft(e,t,n){void 0!==t&&(t|=0),void 0!==n&&(n|=0);var r=e.__ownerID||new S,o=e._origin,a=e._capacity,i=o+t,u=void 0===n?a:n<0?a+n:o+n;if(i===o&&u===a)return e;if(i>=u)return e.clear();for(var s=e._level,c=e._root,l=0;i+l<0;)c=new Ct(c&&c.array.length?[void 0,c]:[],r),l+=1<<(s+=v);l&&(i+=l,o+=l,u+=l,a+=l);for(var f=Ut(a),p=Ut(u);p>=1<f?new Ct([],r):h;if(h&&p>f&&iv;g-=v){var b=f>>>g&y;m=m.array[b]=Lt(m.array[b],r)}m.array[f>>>v&y]=h}if(u=p)i-=p,u-=p,s=v,c=null,d=d&&d.removeBefore(r,0,i);else if(i>o||p>>s&y;if(w!==p>>>s&y)break;w&&(l+=(1<o&&(c=c.removeBefore(r,s,i-l)),c&&pa&&(a=c.size),i(s)||(c=c.map((function(e){return he(e)}))),r.push(c)}return a>e.size&&(e=e.setSize(a)),mt(e,t,r)}function Ut(e){return e>>v<=g&&i.size>=2*a.size?(r=(o=i.filter((function(e,t){return void 0!==e&&u!==t}))).toKeyedSeq().map((function(e){return e[0]})).flip().toMap(),e.__ownerID&&(r.__ownerID=o.__ownerID=e.__ownerID)):(r=a.remove(t),o=u===i.size-1?i.pop():i.set(u,void 0))}else if(s){if(n===i.get(u)[1])return e;r=a,o=i.set(u,[t,n])}else r=a.set(t,i.size),o=i.set(i.size,[t,n]);return e.__ownerID?(e.size=r.size,e._map=r,e._list=o,e.__hash=void 0,e):Wt(r,o)}function Jt(e,t){this._iter=e,this._useKeys=t,this.size=e.size}function Kt(e){this._iter=e,this.size=e.size}function Yt(e){this._iter=e,this.size=e.size}function Gt(e){this._iter=e,this.size=e.size}function Qt(e){var t=bn(e);return t._iter=e,t.size=e.size,t.flip=function(){return e},t.reverse=function(){var t=e.reverse.apply(this);return t.flip=function(){return e.reverse()},t},t.has=function(t){return e.includes(t)},t.includes=function(t){return e.has(t)},t.cacheResult=wn,t.__iterateUncached=function(t,n){var r=this;return e.__iterate((function(e,n){return!1!==t(n,e,r)}),n)},t.__iteratorUncached=function(t,n){if(t===R){var r=e.__iterator(t,n);return new F((function(){var e=r.next();if(!e.done){var t=e.value[0];e.value[0]=e.value[1],e.value[1]=t}return e}))}return e.__iterator(t===M?N:M,n)},t}function Zt(e,t,n){var r=bn(e);return r.size=e.size,r.has=function(t){return e.has(t)},r.get=function(r,o){var a=e.get(r,b);return a===b?o:t.call(n,a,r,e)},r.__iterateUncached=function(r,o){var a=this;return e.__iterate((function(e,o,i){return!1!==r(t.call(n,e,o,i),o,a)}),o)},r.__iteratorUncached=function(r,o){var a=e.__iterator(R,o);return new F((function(){var o=a.next();if(o.done)return o;var i=o.value,u=i[0];return z(r,u,t.call(n,i[1],u,e),o)}))},r}function Xt(e,t){var n=bn(e);return n._iter=e,n.size=e.size,n.reverse=function(){return e},e.flip&&(n.flip=function(){var t=Qt(e);return t.reverse=function(){return e.flip()},t}),n.get=function(n,r){return e.get(t?n:-1-n,r)},n.has=function(n){return e.has(t?n:-1-n)},n.includes=function(t){return e.includes(t)},n.cacheResult=wn,n.__iterate=function(t,n){var r=this;return e.__iterate((function(e,n){return t(e,n,r)}),!n)},n.__iterator=function(t,n){return e.__iterator(t,!n)},n}function en(e,t,n,r){var o=bn(e);return r&&(o.has=function(r){var o=e.get(r,b);return o!==b&&!!t.call(n,o,r,e)},o.get=function(r,o){var a=e.get(r,b);return a!==b&&t.call(n,a,r,e)?a:o}),o.__iterateUncached=function(o,a){var i=this,u=0;return e.__iterate((function(e,a,s){if(t.call(n,e,a,s))return u++,o(e,r?a:u-1,i)}),a),u},o.__iteratorUncached=function(o,a){var i=e.__iterator(R,a),u=0;return new F((function(){for(;;){var a=i.next();if(a.done)return a;var s=a.value,c=s[0],l=s[1];if(t.call(n,l,c,e))return z(o,r?c:u++,l,a)}}))},o}function tn(e,t,n){var r=We().asMutable();return e.__iterate((function(o,a){r.update(t.call(n,o,a,e),0,(function(e){return e+1}))})),r.asImmutable()}function nn(e,t,n){var r=u(e),o=(l(e)?qt():We()).asMutable();e.__iterate((function(a,i){o.update(t.call(n,a,i,e),(function(e){return(e=e||[]).push(r?[i,a]:a),e}))}));var a=yn(e);return o.map((function(t){return mn(e,a(t))}))}function rn(e,t,n,r){var o=e.size;if(void 0!==t&&(t|=0),void 0!==n&&(n===1/0?n=o:n|=0),j(t,n,o))return e;var a=T(t,o),i=I(n,o);if(a!=a||i!=i)return rn(e.toSeq().cacheResult(),t,n,r);var u,s=i-a;s==s&&(u=s<0?0:s);var c=bn(e);return c.size=0===u?u:e.size&&u||void 0,!r&&ae(e)&&u>=0&&(c.get=function(t,n){return(t=O(this,t))>=0&&tu)return U();var e=o.next();return r||t===M?e:z(t,s-1,t===N?void 0:e.value[1],e)}))},c}function on(e,t,n){var r=bn(e);return r.__iterateUncached=function(r,o){var a=this;if(o)return this.cacheResult().__iterate(r,o);var i=0;return e.__iterate((function(e,o,u){return t.call(n,e,o,u)&&++i&&r(e,o,a)})),i},r.__iteratorUncached=function(r,o){var a=this;if(o)return this.cacheResult().__iterator(r,o);var i=e.__iterator(R,o),u=!0;return new F((function(){if(!u)return U();var e=i.next();if(e.done)return e;var o=e.value,s=o[0],c=o[1];return t.call(n,c,s,a)?r===R?e:z(r,s,c,e):(u=!1,U())}))},r}function an(e,t,n,r){var o=bn(e);return o.__iterateUncached=function(o,a){var i=this;if(a)return this.cacheResult().__iterate(o,a);var u=!0,s=0;return e.__iterate((function(e,a,c){if(!u||!(u=t.call(n,e,a,c)))return s++,o(e,r?a:s-1,i)})),s},o.__iteratorUncached=function(o,a){var i=this;if(a)return this.cacheResult().__iterator(o,a);var u=e.__iterator(R,a),s=!0,c=0;return new F((function(){var e,a,l;do{if((e=u.next()).done)return r||o===M?e:z(o,c++,o===N?void 0:e.value[1],e);var f=e.value;a=f[0],l=f[1],s&&(s=t.call(n,l,a,i))}while(s);return o===R?e:z(o,a,l,e)}))},o}function un(e,t){var n=u(e),o=[e].concat(t).map((function(e){return i(e)?n&&(e=r(e)):e=n?ue(e):se(Array.isArray(e)?e:[e]),e})).filter((function(e){return 0!==e.size}));if(0===o.length)return e;if(1===o.length){var a=o[0];if(a===e||n&&u(a)||s(e)&&s(a))return a}var c=new te(o);return n?c=c.toKeyedSeq():s(e)||(c=c.toSetSeq()),(c=c.flatten(!0)).size=o.reduce((function(e,t){if(void 0!==e){var n=t.size;if(void 0!==n)return e+n}}),0),c}function sn(e,t,n){var r=bn(e);return r.__iterateUncached=function(r,o){var a=0,u=!1;function s(e,c){var l=this;e.__iterate((function(e,o){return(!t||c0}function dn(e,t,r){var o=bn(e);return o.size=new te(r).map((function(e){return e.size})).min(),o.__iterate=function(e,t){for(var n,r=this.__iterator(M,t),o=0;!(n=r.next()).done&&!1!==e(n.value,o++,this););return o},o.__iteratorUncached=function(e,o){var a=r.map((function(e){return e=n(e),W(o?e.reverse():e)})),i=0,u=!1;return new F((function(){var n;return u||(n=a.map((function(e){return e.next()})),u=n.some((function(e){return e.done}))),u?U():z(e,i++,t.apply(null,n.map((function(e){return e.value}))))}))},o}function mn(e,t){return ae(e)?t:e.constructor(t)}function vn(e){if(e!==Object(e))throw new TypeError("Expected [K, V] tuple: "+e)}function gn(e){return Ve(e.size),A(e)}function yn(e){return u(e)?r:s(e)?o:a}function bn(e){return Object.create((u(e)?K:s(e)?Y:G).prototype)}function wn(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):J.prototype.cacheResult.call(this)}function xn(e,t){return e>t?1:e=0;n--)t={value:arguments[n],next:t};return this.__ownerID?(this.size=e,this._head=t,this.__hash=void 0,this.__altered=!0,this):Kn(e,t)},Vn.prototype.pushAll=function(e){if(0===(e=o(e)).size)return this;Ve(e.size);var t=this.size,n=this._head;return e.reverse().forEach((function(e){t++,n={value:e,next:n}})),this.__ownerID?(this.size=t,this._head=n,this.__hash=void 0,this.__altered=!0,this):Kn(t,n)},Vn.prototype.pop=function(){return this.slice(1)},Vn.prototype.unshift=function(){return this.push.apply(this,arguments)},Vn.prototype.unshiftAll=function(e){return this.pushAll(e)},Vn.prototype.shift=function(){return this.pop.apply(this,arguments)},Vn.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):Yn()},Vn.prototype.slice=function(e,t){if(j(e,t,this.size))return this;var n=T(e,this.size);if(I(t,this.size)!==this.size)return Se.prototype.slice.call(this,e,t);for(var r=this.size-n,o=this._head;n--;)o=o.next;return this.__ownerID?(this.size=r,this._head=o,this.__hash=void 0,this.__altered=!0,this):Kn(r,o)},Vn.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?Kn(this.size,this._head,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},Vn.prototype.__iterate=function(e,t){if(t)return this.reverse().__iterate(e);for(var n=0,r=this._head;r&&!1!==e(r.value,n++,this);)r=r.next;return n},Vn.prototype.__iterator=function(e,t){if(t)return this.reverse().__iterator(e);var n=0,r=this._head;return new F((function(){if(r){var t=r.value;return r=r.next,z(e,n++,t)}return U()}))},Vn.isStack=Wn;var Hn,$n="@@__IMMUTABLE_STACK__@@",Jn=Vn.prototype;function Kn(e,t,n,r){var o=Object.create(Jn);return o.size=e,o._head=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Yn(){return Hn||(Hn=Kn(0))}function Gn(e,t){var n=function(n){e.prototype[n]=t[n]};return Object.keys(t).forEach(n),Object.getOwnPropertySymbols&&Object.getOwnPropertySymbols(t).forEach(n),e}Jn[$n]=!0,Jn.withMutations=Ke.withMutations,Jn.asMutable=Ke.asMutable,Jn.asImmutable=Ke.asImmutable,Jn.wasAltered=Ke.wasAltered,n.Iterator=F,Gn(n,{toArray:function(){Ve(this.size);var e=new Array(this.size||0);return this.valueSeq().__iterate((function(t,n){e[n]=t})),e},toIndexedSeq:function(){return new Kt(this)},toJS:function(){return this.toSeq().map((function(e){return e&&"function"==typeof e.toJS?e.toJS():e})).__toJS()},toJSON:function(){return this.toSeq().map((function(e){return e&&"function"==typeof e.toJSON?e.toJSON():e})).__toJS()},toKeyedSeq:function(){return new Jt(this,!0)},toMap:function(){return We(this.toKeyedSeq())},toObject:function(){Ve(this.size);var e={};return this.__iterate((function(t,n){e[n]=t})),e},toOrderedMap:function(){return qt(this.toKeyedSeq())},toOrderedSet:function(){return Ln(u(this)?this.valueSeq():this)},toSet:function(){return jn(u(this)?this.valueSeq():this)},toSetSeq:function(){return new Yt(this)},toSeq:function(){return s(this)?this.toIndexedSeq():u(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return Vn(u(this)?this.valueSeq():this)},toList:function(){return St(u(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(e,t){return 0===this.size?e+t:e+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+t},concat:function(){return mn(this,un(this,e.call(arguments,0)))},includes:function(e){return this.some((function(t){return ge(t,e)}))},entries:function(){return this.__iterator(R)},every:function(e,t){Ve(this.size);var n=!0;return this.__iterate((function(r,o,a){if(!e.call(t,r,o,a))return n=!1,!1})),n},filter:function(e,t){return mn(this,en(this,e,t,!0))},find:function(e,t,n){var r=this.findEntry(e,t);return r?r[1]:n},forEach:function(e,t){return Ve(this.size),this.__iterate(t?e.bind(t):e)},join:function(e){Ve(this.size),e=void 0!==e?""+e:",";var t="",n=!0;return this.__iterate((function(r){n?n=!1:t+=e,t+=null!=r?r.toString():""})),t},keys:function(){return this.__iterator(N)},map:function(e,t){return mn(this,Zt(this,e,t))},reduce:function(e,t,n){var r,o;return Ve(this.size),arguments.length<2?o=!0:r=t,this.__iterate((function(t,a,i){o?(o=!1,r=t):r=e.call(n,r,t,a,i)})),r},reduceRight:function(e,t,n){var r=this.toKeyedSeq().reverse();return r.reduce.apply(r,arguments)},reverse:function(){return mn(this,Xt(this,!0))},slice:function(e,t){return mn(this,rn(this,e,t,!0))},some:function(e,t){return!this.every(tr(e),t)},sort:function(e){return mn(this,fn(this,e))},values:function(){return this.__iterator(M)},butLast:function(){return this.slice(0,-1)},isEmpty:function(){return void 0!==this.size?0===this.size:!this.some((function(){return!0}))},count:function(e,t){return A(e?this.toSeq().filter(e,t):this)},countBy:function(e,t){return tn(this,e,t)},equals:function(e){return ye(this,e)},entrySeq:function(){var e=this;if(e._cache)return new te(e._cache);var t=e.toSeq().map(er).toIndexedSeq();return t.fromEntrySeq=function(){return e.toSeq()},t},filterNot:function(e,t){return this.filter(tr(e),t)},findEntry:function(e,t,n){var r=n;return this.__iterate((function(n,o,a){if(e.call(t,n,o,a))return r=[o,n],!1})),r},findKey:function(e,t){var n=this.findEntry(e,t);return n&&n[0]},findLast:function(e,t,n){return this.toKeyedSeq().reverse().find(e,t,n)},findLastEntry:function(e,t,n){return this.toKeyedSeq().reverse().findEntry(e,t,n)},findLastKey:function(e,t){return this.toKeyedSeq().reverse().findKey(e,t)},first:function(){return this.find(C)},flatMap:function(e,t){return mn(this,cn(this,e,t))},flatten:function(e){return mn(this,sn(this,e,!0))},fromEntrySeq:function(){return new Gt(this)},get:function(e,t){return this.find((function(t,n){return ge(n,e)}),void 0,t)},getIn:function(e,t){for(var n,r=this,o=En(e);!(n=o.next()).done;){var a=n.value;if((r=r&&r.get?r.get(a,b):b)===b)return t}return r},groupBy:function(e,t){return nn(this,e,t)},has:function(e){return this.get(e,b)!==b},hasIn:function(e){return this.getIn(e,b)!==b},isSubset:function(e){return e="function"==typeof e.includes?e:n(e),this.every((function(t){return e.includes(t)}))},isSuperset:function(e){return(e="function"==typeof e.isSubset?e:n(e)).isSubset(this)},keyOf:function(e){return this.findKey((function(t){return ge(t,e)}))},keySeq:function(){return this.toSeq().map(Xn).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},lastKeyOf:function(e){return this.toKeyedSeq().reverse().keyOf(e)},max:function(e){return pn(this,e)},maxBy:function(e,t){return pn(this,t,e)},min:function(e){return pn(this,e?nr(e):ar)},minBy:function(e,t){return pn(this,t?nr(t):ar,e)},rest:function(){return this.slice(1)},skip:function(e){return this.slice(Math.max(0,e))},skipLast:function(e){return mn(this,this.toSeq().reverse().skip(e).reverse())},skipWhile:function(e,t){return mn(this,an(this,e,t,!0))},skipUntil:function(e,t){return this.skipWhile(tr(e),t)},sortBy:function(e,t){return mn(this,fn(this,t,e))},take:function(e){return this.slice(0,Math.max(0,e))},takeLast:function(e){return mn(this,this.toSeq().reverse().take(e).reverse())},takeWhile:function(e,t){return mn(this,on(this,e,t))},takeUntil:function(e,t){return this.takeWhile(tr(e),t)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=ir(this))}});var Qn=n.prototype;Qn[f]=!0,Qn[B]=Qn.values,Qn.__toJS=Qn.toArray,Qn.__toStringMapper=rr,Qn.inspect=Qn.toSource=function(){return this.toString()},Qn.chain=Qn.flatMap,Qn.contains=Qn.includes,Gn(r,{flip:function(){return mn(this,Qt(this))},mapEntries:function(e,t){var n=this,r=0;return mn(this,this.toSeq().map((function(o,a){return e.call(t,[a,o],r++,n)})).fromEntrySeq())},mapKeys:function(e,t){var n=this;return mn(this,this.toSeq().flip().map((function(r,o){return e.call(t,r,o,n)})).flip())}});var Zn=r.prototype;function Xn(e,t){return t}function er(e,t){return[t,e]}function tr(e){return function(){return!e.apply(this,arguments)}}function nr(e){return function(){return-e.apply(this,arguments)}}function rr(e){return"string"==typeof e?JSON.stringify(e):String(e)}function or(){return k(arguments)}function ar(e,t){return et?-1:0}function ir(e){if(e.size===1/0)return 0;var t=l(e),n=u(e),r=t?1:0;return ur(e.__iterate(n?t?function(e,t){r=31*r+sr(Ce(e),Ce(t))|0}:function(e,t){r=r+sr(Ce(e),Ce(t))|0}:t?function(e){r=31*r+Ce(e)|0}:function(e){r=r+Ce(e)|0}),r)}function ur(e,t){return t=Ae(t,3432918353),t=Ae(t<<15|t>>>-15,461845907),t=Ae(t<<13|t>>>-13,5),t=Ae((t=(t+3864292196|0)^e)^t>>>16,2246822507),t=Oe((t=Ae(t^t>>>13,3266489909))^t>>>16)}function sr(e,t){return e^t+2654435769+(e<<6)+(e>>2)|0}return Zn[p]=!0,Zn[B]=Qn.entries,Zn.__toJS=Qn.toObject,Zn.__toStringMapper=function(e,t){return JSON.stringify(t)+": "+rr(e)},Gn(o,{toKeyedSeq:function(){return new Jt(this,!1)},filter:function(e,t){return mn(this,en(this,e,t,!1))},findIndex:function(e,t){var n=this.findEntry(e,t);return n?n[0]:-1},indexOf:function(e){var t=this.keyOf(e);return void 0===t?-1:t},lastIndexOf:function(e){var t=this.lastKeyOf(e);return void 0===t?-1:t},reverse:function(){return mn(this,Xt(this,!1))},slice:function(e,t){return mn(this,rn(this,e,t,!1))},splice:function(e,t){var n=arguments.length;if(t=Math.max(0|t,0),0===n||2===n&&!t)return this;e=T(e,e<0?this.count():this.size);var r=this.slice(0,e);return mn(this,1===n?r:r.concat(k(arguments,2),this.slice(e+t)))},findLastIndex:function(e,t){var n=this.findLastEntry(e,t);return n?n[0]:-1},first:function(){return this.get(0)},flatten:function(e){return mn(this,sn(this,e,!1))},get:function(e,t){return(e=O(this,e))<0||this.size===1/0||void 0!==this.size&&e>this.size?t:this.find((function(t,n){return n===e}),void 0,t)},has:function(e){return(e=O(this,e))>=0&&(void 0!==this.size?this.size===1/0||e1)try{return decodeURIComponent(t[1])}catch(e){console.error(e)}return null}function Pe(e){return t=e.replace(/\.[^./]*$/,""),Y()(J()(t));var t}function Ne(e,t,n,r,a){if(!t)return[];var u=[],s=t.get("nullable"),c=t.get("required"),f=t.get("maximum"),h=t.get("minimum"),d=t.get("type"),m=t.get("format"),g=t.get("maxLength"),b=t.get("minLength"),x=t.get("uniqueItems"),E=t.get("maxItems"),_=t.get("minItems"),S=t.get("pattern"),k=n||!0===c,A=null!=e;if(s&&null===e||!d||!(k||A&&"array"===d||!(!k&&!A)))return[];var O="string"===d&&e,C="array"===d&&l()(e)&&e.length,j="array"===d&&W.a.List.isList(e)&&e.count(),T=[O,C,j,"array"===d&&"string"==typeof e&&e,"file"===d&&e instanceof ue.a.File,"boolean"===d&&(e||!1===e),"number"===d&&(e||0===e),"integer"===d&&(e||0===e),"object"===d&&"object"===i()(e)&&null!==e,"object"===d&&"string"==typeof e&&e],I=P()(T).call(T,(function(e){return!!e}));if(k&&!I&&!r)return u.push("Required field is not provided"),u;if("object"===d&&(null===a||"application/json"===a)){var N,M=e;if("string"==typeof e)try{M=JSON.parse(e)}catch(e){return u.push("Parameter string value must be valid JSON"),u}if(t&&t.has("required")&&_e(c.isList)&&c.isList()&&y()(c).call(c,(function(e){void 0===M[e]&&u.push({propKey:e,error:"Required property not found"})})),t&&t.has("properties"))y()(N=t.get("properties")).call(N,(function(e,t){var n=Ne(M[t],e,!1,r,a);u.push.apply(u,o()(p()(n).call(n,(function(e){return{propKey:t,error:e}}))))}))}if(S){var R=function(e,t){if(!new RegExp(t).test(e))return"Value must follow pattern "+t}(e,S);R&&u.push(R)}if(_&&"array"===d){var D=function(e,t){var n;if(!e&&t>=1||e&&e.lengtht)return v()(n="Array must not contain more then ".concat(t," item")).call(n,1===t?"":"s")}(e,E);L&&u.push({needRemove:!0,error:L})}if(x&&"array"===d){var B=function(e,t){if(e&&("true"===t||!0===t)){var n=Object(V.fromJS)(e),r=n.toSet();if(e.length>r.size){var o=Object(V.Set)();if(y()(n).call(n,(function(e,t){w()(n).call(n,(function(t){return _e(t.equals)?t.equals(e):t===e})).size>1&&(o=o.add(t))})),0!==o.size)return p()(o).call(o,(function(e){return{index:e,error:"No duplicates allowed."}})).toArray()}}}(e,x);B&&u.push.apply(u,o()(B))}if(g||0===g){var F=function(e,t){var n;if(e.length>t)return v()(n="Value must be no longer than ".concat(t," character")).call(n,1!==t?"s":"")}(e,g);F&&u.push(F)}if(b){var z=function(e,t){var n;if(e.lengtht)return"Value must be less than ".concat(t)}(e,f);U&&u.push(U)}if(h||0===h){var q=function(e,t){if(e2&&void 0!==arguments[2]?arguments[2]:{},r=n.isOAS3,o=void 0!==r&&r,a=n.bypassRequiredCheck,i=void 0!==a&&a,u=e.get("required"),s=Object(le.a)(e,{isOAS3:o}),c=s.schema,l=s.parameterContentMediaType;return Ne(t,c,u,i,l)},Re=function(e,t,n){if(e&&(!e.xml||!e.xml.name)){if(e.xml=e.xml||{},!e.$$ref)return e.type||e.items||e.properties||e.additionalProperties?'\n\x3c!-- XML example cannot be generated; root element name is undefined --\x3e':null;var r=e.$$ref.match(/\S*\/(\S+)$/);e.xml.name=r[1]}return Object(ie.memoizedCreateXMLExample)(e,t,n)},De=[{when:/json/,shouldStringifyTypes:["string"]}],Le=["object"],Be=function(e,t,n,r){var a=Object(ie.memoizedSampleFromSchema)(e,t,r),u=i()(a),s=S()(De).call(De,(function(e,t){var r;return t.when.test(n)?v()(r=[]).call(r,o()(e),o()(t.shouldStringifyTypes)):e}),Le);return te()(s,(function(e){return e===u}))?M()(a,null,2):a},Fe=function(e,t,n,r){var o,a=Be(e,t,n,r);try{"\n"===(o=me.a.dump(me.a.load(a),{lineWidth:-1}))[o.length-1]&&(o=T()(o).call(o,0,o.length-1))}catch(e){return console.error(e),"error: could not generate yaml example"}return o.replace(/\t/g," ")},ze=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"",n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:void 0;return e&&_e(e.toJS)&&(e=e.toJS()),r&&_e(r.toJS)&&(r=r.toJS()),/xml/.test(t)?Re(e,n,r):/(yaml|yml)/.test(t)?Fe(e,n,t,r):Be(e,n,t,r)},Ue=function(){var e={},t=ue.a.location.search;if(!t)return{};if(""!=t){var n=t.substr(1).split("&");for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(r=n[r].split("="),e[decodeURIComponent(r[0])]=r[1]&&decodeURIComponent(r[1])||"")}return e},qe=function(t){return(t instanceof e?t:e.from(t.toString(),"utf-8")).toString("base64")},Ve={operationsSorter:{alpha:function(e,t){return e.get("path").localeCompare(t.get("path"))},method:function(e,t){return e.get("method").localeCompare(t.get("method"))}},tagsSorter:{alpha:function(e,t){return e.localeCompare(t)}}},We=function(e){var t=[];for(var n in e){var r=e[n];void 0!==r&&""!==r&&t.push([n,"=",encodeURIComponent(r).replace(/%20/g,"+")].join(""))}return t.join("&")},He=function(e,t,n){return!!X()(n,(function(n){return re()(e[n],t[n])}))};function $e(e){return"string"!=typeof e||""===e?"":Object(H.sanitizeUrl)(e)}function Je(e){return!(!e||D()(e).call(e,"localhost")>=0||D()(e).call(e,"127.0.0.1")>=0||"none"===e)}function Ke(e){if(!W.a.OrderedMap.isOrderedMap(e))return null;if(!e.size)return null;var t=B()(e).call(e,(function(e,t){return z()(t).call(t,"2")&&E()(e.get("content")||{}).length>0})),n=e.get("default")||W.a.OrderedMap(),r=(n.get("content")||W.a.OrderedMap()).keySeq().toJS().length?n:null;return t||r}var Ye=function(e){return"string"==typeof e||e instanceof String?q()(e).call(e).replace(/\s/g,"%20"):""},Ge=function(e){return ce()(Ye(e).replace(/%20/g,"_"))},Qe=function(e){return w()(e).call(e,(function(e,t){return/^x-/.test(t)}))},Ze=function(e){return w()(e).call(e,(function(e,t){return/^pattern|maxLength|minLength|maximum|minimum/.test(t)}))};function Xe(e,t){var n,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){return!0};if("object"!==i()(e)||l()(e)||null===e||!t)return e;var o=A()({},e);return y()(n=E()(o)).call(n,(function(e){e===t&&r(o[e],e)?delete o[e]:o[e]=Xe(o[e],t,r)})),o}function et(e){if("string"==typeof e)return e;if(e&&e.toJS&&(e=e.toJS()),"object"===i()(e)&&null!==e)try{return M()(e,null,2)}catch(t){return String(e)}return null==e?"":e.toString()}function tt(e){return"number"==typeof e?e.toString():e}function nt(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=t.returnAll,r=void 0!==n&&n,o=t.allowHashes,a=void 0===o||o;if(!W.a.Map.isMap(e))throw new Error("paramToIdentifier: received a non-Im.Map parameter as input");var i,u,s,c=e.get("name"),l=e.get("in"),f=[];e&&e.hashCode&&l&&c&&a&&f.push(v()(i=v()(u="".concat(l,".")).call(u,c,".hash-")).call(i,e.hashCode()));l&&c&&f.push(v()(s="".concat(l,".")).call(s,c));return f.push(c),r?f:f[0]||""}function rt(e,t){var n,r=nt(e,{returnAll:!0});return w()(n=p()(r).call(r,(function(e){return t[e]}))).call(n,(function(e){return void 0!==e}))[0]}function ot(){return it(pe()(32).toString("base64"))}function at(e){return it(de()("sha256").update(e).digest("base64"))}function it(e){return e.replace(/\+/g,"-").replace(/\//g,"_").replace(/=/g,"")}var ut=function(e){return!e||!(!ge(e)||!e.isEmpty())}}).call(this,n(129).Buffer)},function(e,t,n){var r=n(416),o=n(185),a=n(250);e.exports=function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=r(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),o(e,"prototype",{writable:!1}),t&&a(e,t)},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t,n){var r=n(251),o=n(137),a=n(417),i=n(769);e.exports=function(e){var t=a();return function(){var n,a=o(e);if(t){var u=o(this).constructor;n=r(a,arguments,u)}else n=a.apply(this,arguments);return i(this,n)}},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t){e.exports=function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t,n){e.exports=n(889)()},function(e,t,n){e.exports=n(349)},function(e,t,n){var r=n(386),o=n(627),a=n(197),i=n(387);e.exports=function(e,t){return r(e)||o(e,t)||a(e,t)||i()},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t,n){var r=n(611),o=n(379),a=n(197),i=n(626);e.exports=function(e){return r(e)||o(e)||a(e)||i()},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t,n){e.exports=n(350)},function(e,t,n){"use strict";var r=n(17),o=n(96),a=n(28),i=n(42),u=n(97).f,s=n(335),c=n(35),l=n(55),f=n(90),p=n(47),h=function(e){var t=function(n,r,a){if(this instanceof t){switch(arguments.length){case 0:return new e;case 1:return new e(n);case 2:return new e(n,r)}return new e(n,r,a)}return o(e,this,arguments)};return t.prototype=e.prototype,t};e.exports=function(e,t){var n,o,d,m,v,g,y,b,w=e.target,x=e.global,E=e.stat,_=e.proto,S=x?r:E?r[w]:(r[w]||{}).prototype,k=x?c:c[w]||f(c,w,{})[w],A=k.prototype;for(d in t)n=!s(x?d:w+(E?".":"#")+d,e.forced)&&S&&p(S,d),v=k[d],n&&(g=e.noTargetGet?(b=u(S,d))&&b.value:S[d]),m=n&&g?g:t[d],n&&typeof v==typeof m||(y=e.bind&&n?l(m,r):e.wrap&&n?h(m):_&&i(m)?a(m):m,(e.sham||m&&m.sham||v&&v.sham)&&f(y,"sham",!0),f(k,d,y),_&&(p(c,o=w+"Prototype")||f(c,o,{}),f(c[o],d,m),e.real&&A&&!A[d]&&f(A,d,m)))}},function(e,t,n){(function(t){var n=function(e){return e&&e.Math==Math&&e};e.exports=n("object"==typeof globalThis&&globalThis)||n("object"==typeof window&&window)||n("object"==typeof self&&self)||n("object"==typeof t&&t)||function(){return this}()||Function("return this")()}).call(this,n(61))},function(e,t,n){"use strict";n.d(t,"a",(function(){return s}));var r="NOT_FOUND";var o=function(e,t){return e===t};function a(e,t){var n,a,i="object"==typeof t?t:{equalityCheck:t},u=i.equalityCheck,s=void 0===u?o:u,c=i.maxSize,l=void 0===c?1:c,f=i.resultEqualityCheck,p=function(e){return function(t,n){if(null===t||null===n||t.length!==n.length)return!1;for(var r=t.length,o=0;o-1){var a=n[o];return o>0&&(n.splice(o,1),n.unshift(a)),a.value}return r}return{get:o,put:function(t,a){o(t)===r&&(n.unshift({key:t,value:a}),n.length>e&&n.pop())},getEntries:function(){return n},clear:function(){n=[]}}}(l,p);function d(){var t=h.get(arguments);if(t===r){if(t=e.apply(null,arguments),f){var n=h.getEntries(),o=n.find((function(e){return f(e.value,t)}));o&&(t=o.value)}h.put(arguments,t)}return t}return d.clearCache=function(){return h.clear()},d}function i(e){var t=Array.isArray(e[0])?e[0]:e;if(!t.every((function(e){return"function"==typeof e}))){var n=t.map((function(e){return"function"==typeof e?"function "+(e.name||"unnamed")+"()":typeof e})).join(", ");throw new Error("createSelector expects all input-selectors to be functions, but received the following types: ["+n+"]")}return t}function u(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r>",i=function(){invariant(!1,"ImmutablePropTypes type checking code is stripped in production.")};i.isRequired=i;var u=function(){return i};function s(e){var t=typeof e;return Array.isArray(e)?"array":e instanceof RegExp?"object":e instanceof o.Iterable?"Immutable."+e.toSource().split(" ")[0]:t}function c(e){function t(t,n,r,o,i,u){for(var s=arguments.length,c=Array(s>6?s-6:0),l=6;l4)}function l(e){var t=e.get("swagger");return"string"==typeof t&&i()(t).call(t,"2.0")}function f(e){return function(t,n){return function(r){return n&&n.specSelectors&&n.specSelectors.specJson?c(n.specSelectors.specJson())?s.a.createElement(e,o()({},r,n,{Ori:t})):s.a.createElement(t,r):(console.warn("OAS3 wrapper: couldn't get spec"),null)}}}},function(e,t,n){var r=n(17),o=n(221),a=n(47),i=n(179),u=n(219),s=n(333),c=o("wks"),l=r.Symbol,f=l&&l.for,p=s?l:l&&l.withoutSetter||i;e.exports=function(e){if(!a(c,e)||!u&&"string"!=typeof c[e]){var t="Symbol."+e;u&&a(l,e)?c[e]=l[e]:c[e]=s&&f?f(t):p(t)}return c[e]}},function(e,t,n){var r=n(35);e.exports=function(e){return r[e+"Prototype"]}},function(e,t){e.exports=function(e){return"function"==typeof e}},function(e,t,n){var r=n(245);e.exports=function(e,t,n){var o=null==e?void 0:r(e,t);return void 0===o?n:o}},function(e,t,n){e.exports=n(872)},function(e,t,n){var r=n(42);e.exports=function(e){return"object"==typeof e?null!==e:r(e)}},function(e,t,n){var r=n(17),o=n(42),a=n(178),i=r.TypeError;e.exports=function(e){if(o(e))return e;throw i(a(e)+" is not a function")}},function(e,t,n){var r=n(28),o=n(62),a=r({}.hasOwnProperty);e.exports=Object.hasOwn||function(e,t){return a(o(e),t)}},function(e,t,n){var r=n(35),o=n(47),a=n(231),i=n(63).f;e.exports=function(e){var t=r.Symbol||(r.Symbol={});o(t,e)||i(t,e,{value:a.f(e)})}},function(e,t,n){var r=n(17),o=n(55),a=n(37),i=n(33),u=n(178),s=n(384),c=n(67),l=n(36),f=n(159),p=n(158),h=n(383),d=r.TypeError,m=function(e,t){this.stopped=e,this.result=t},v=m.prototype;e.exports=function(e,t,n){var r,g,y,b,w,x,E,_=n&&n.that,S=!(!n||!n.AS_ENTRIES),k=!(!n||!n.IS_ITERATOR),A=!(!n||!n.INTERRUPTED),O=o(t,_),C=function(e){return r&&h(r,"normal",e),new m(!0,e)},j=function(e){return S?(i(e),A?O(e[0],e[1],C):O(e[0],e[1])):A?O(e,C):O(e)};if(k)r=e;else{if(!(g=p(e)))throw d(u(e)+" is not iterable");if(s(g)){for(y=0,b=c(e);b>y;y++)if((w=j(e[y]))&&l(v,w))return w;return new m(!1)}r=f(e,g)}for(x=r.next;!(E=a(x,r)).done;){try{w=j(E.value)}catch(e){h(r,"throw",e)}if("object"==typeof w&&w&&l(v,w))return w}return new m(!1)}},function(e,t,n){e.exports=n(353)},function(e,t,n){var r=n(34);e.exports=!r((function(){return 7!=Object.defineProperty({},1,{get:function(){return 7}})[1]}))},function(e,t,n){e.exports=n(656)},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_SPEC",(function(){return ee})),n.d(t,"UPDATE_URL",(function(){return te})),n.d(t,"UPDATE_JSON",(function(){return ne})),n.d(t,"UPDATE_PARAM",(function(){return re})),n.d(t,"UPDATE_EMPTY_PARAM_INCLUSION",(function(){return oe})),n.d(t,"VALIDATE_PARAMS",(function(){return ae})),n.d(t,"SET_RESPONSE",(function(){return ie})),n.d(t,"SET_REQUEST",(function(){return ue})),n.d(t,"SET_MUTATED_REQUEST",(function(){return se})),n.d(t,"LOG_REQUEST",(function(){return ce})),n.d(t,"CLEAR_RESPONSE",(function(){return le})),n.d(t,"CLEAR_REQUEST",(function(){return fe})),n.d(t,"CLEAR_VALIDATE_PARAMS",(function(){return pe})),n.d(t,"UPDATE_OPERATION_META_VALUE",(function(){return he})),n.d(t,"UPDATE_RESOLVED",(function(){return de})),n.d(t,"UPDATE_RESOLVED_SUBTREE",(function(){return me})),n.d(t,"SET_SCHEME",(function(){return ve})),n.d(t,"updateSpec",(function(){return ge})),n.d(t,"updateResolved",(function(){return ye})),n.d(t,"updateUrl",(function(){return be})),n.d(t,"updateJsonSpec",(function(){return we})),n.d(t,"parseToJson",(function(){return xe})),n.d(t,"resolveSpec",(function(){return _e})),n.d(t,"requestResolvedSubtree",(function(){return Ae})),n.d(t,"changeParam",(function(){return Oe})),n.d(t,"changeParamByIdentity",(function(){return Ce})),n.d(t,"updateResolvedSubtree",(function(){return je})),n.d(t,"invalidateResolvedSubtreeCache",(function(){return Te})),n.d(t,"validateParams",(function(){return Ie})),n.d(t,"updateEmptyParamInclusion",(function(){return Pe})),n.d(t,"clearValidateParams",(function(){return Ne})),n.d(t,"changeConsumesValue",(function(){return Me})),n.d(t,"changeProducesValue",(function(){return Re})),n.d(t,"setResponse",(function(){return De})),n.d(t,"setRequest",(function(){return Le})),n.d(t,"setMutatedRequest",(function(){return Be})),n.d(t,"logRequest",(function(){return Fe})),n.d(t,"executeRequest",(function(){return ze})),n.d(t,"execute",(function(){return Ue})),n.d(t,"clearResponse",(function(){return qe})),n.d(t,"clearRequest",(function(){return Ve})),n.d(t,"setScheme",(function(){return We}));var r=n(25),o=n.n(r),a=n(59),i=n.n(a),u=n(79),s=n.n(u),c=n(19),l=n.n(c),f=n(44),p=n.n(f),h=n(22),d=n.n(h),m=n(4),v=n.n(m),g=n(323),y=n.n(g),b=n(31),w=n.n(b),x=n(140),E=n.n(x),_=n(72),S=n.n(_),k=n(12),A=n.n(k),O=n(207),C=n.n(O),j=n(20),T=n.n(j),I=n(23),P=n.n(I),N=n(2),M=n.n(N),R=n(15),D=n.n(R),L=n(24),B=n.n(L),F=n(324),z=n.n(F),U=n(76),q=n(1),V=n(92),W=n.n(V),H=n(138),$=n(455),J=n.n($),K=n(456),Y=n.n(K),G=n(325),Q=n.n(G),Z=n(7),X=["path","method"],ee="spec_update_spec",te="spec_update_url",ne="spec_update_json",re="spec_update_param",oe="spec_update_empty_param_inclusion",ae="spec_validate_param",ie="spec_set_response",ue="spec_set_request",se="spec_set_mutated_request",ce="spec_log_request",le="spec_clear_response",fe="spec_clear_request",pe="spec_clear_validate_param",he="spec_update_operation_meta_value",de="spec_update_resolved",me="spec_update_resolved_subtree",ve="set_scheme";function ge(e){var t,n=(t=e,J()(t)?t:"").replace(/\t/g," ");if("string"==typeof e)return{type:ee,payload:n}}function ye(e){return{type:de,payload:e}}function be(e){return{type:te,payload:e}}function we(e){return{type:ne,payload:e}}var xe=function(e){return function(t){var n=t.specActions,r=t.specSelectors,o=t.errActions,a=r.specStr,i=null;try{e=e||a(),o.clear({source:"parser"}),i=U.a.load(e)}catch(e){return console.error(e),o.newSpecErr({source:"parser",level:"error",message:e.reason,line:e.mark&&e.mark.line?e.mark.line+1:void 0})}return i&&"object"===l()(i)?n.updateJsonSpec(i):{}}},Ee=!1,_e=function(e,t){return function(n){var r=n.specActions,o=n.specSelectors,a=n.errActions,i=n.fn,u=i.fetch,s=i.resolve,c=i.AST,l=void 0===c?{}:c,f=n.getConfigs;Ee||(console.warn("specActions.resolveSpec is deprecated since v3.10.0 and will be removed in v4.0.0; use requestResolvedSubtree instead!"),Ee=!0);var p=f(),h=p.modelPropertyMacro,m=p.parameterMacro,g=p.requestInterceptor,b=p.responseInterceptor;void 0===e&&(e=o.specJson()),void 0===t&&(t=o.url());var w=l.getLineNumberForPath?l.getLineNumberForPath:function(){},x=o.specStr();return s({fetch:u,spec:e,baseDoc:t,modelPropertyMacro:h,parameterMacro:m,requestInterceptor:g,responseInterceptor:b}).then((function(e){var t=e.spec,n=e.errors;if(a.clear({type:"thrown"}),d()(n)&&n.length>0){var o=v()(n).call(n,(function(e){return console.error(e),e.line=e.fullPath?w(x,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",y()(e,"message",{enumerable:!0,value:e.message}),e}));a.newThrownErrBatch(o)}return r.updateResolved(t)}))}},Se=[],ke=Y()(s()(p.a.mark((function e(){var t,n,r,o,a,i,u,c,l,f,h,m,g,b,x,_,k,O;return p.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(t=Se.system){e.next=4;break}return console.error("debResolveSubtrees: don't have a system to operate on, aborting."),e.abrupt("return");case 4:if(n=t.errActions,r=t.errSelectors,o=t.fn,a=o.resolveSubtree,i=o.fetch,u=o.AST,c=void 0===u?{}:u,l=t.specSelectors,f=t.specActions,a){e.next=8;break}return console.error("Error: Swagger-Client did not provide a `resolveSubtree` method, doing nothing."),e.abrupt("return");case 8:return h=c.getLineNumberForPath?c.getLineNumberForPath:function(){},m=l.specStr(),g=t.getConfigs(),b=g.modelPropertyMacro,x=g.parameterMacro,_=g.requestInterceptor,k=g.responseInterceptor,e.prev=11,e.next=14,w()(Se).call(Se,function(){var e=s()(p.a.mark((function e(t,o){var u,c,f,g,w,O,j,T,I;return p.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.next=2,t;case 2:return u=e.sent,c=u.resultMap,f=u.specWithCurrentSubtrees,e.next=7,a(f,o,{baseDoc:l.url(),modelPropertyMacro:b,parameterMacro:x,requestInterceptor:_,responseInterceptor:k});case 7:if(g=e.sent,w=g.errors,O=g.spec,r.allErrors().size&&n.clearBy((function(e){var t;return"thrown"!==e.get("type")||"resolver"!==e.get("source")||!E()(t=e.get("fullPath")).call(t,(function(e,t){return e===o[t]||void 0===o[t]}))})),d()(w)&&w.length>0&&(j=v()(w).call(w,(function(e){return e.line=e.fullPath?h(m,e.fullPath):null,e.path=e.fullPath?e.fullPath.join("."):null,e.level="error",e.type="thrown",e.source="resolver",y()(e,"message",{enumerable:!0,value:e.message}),e})),n.newThrownErrBatch(j)),!O||!l.isOAS3()||"components"!==o[0]||"securitySchemes"!==o[1]){e.next=15;break}return e.next=15,S.a.all(v()(T=A()(I=C()(O)).call(I,(function(e){return"openIdConnect"===e.type}))).call(T,function(){var e=s()(p.a.mark((function e(t){var n,r;return p.a.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return n={url:t.openIdConnectUrl,requestInterceptor:_,responseInterceptor:k},e.prev=1,e.next=4,i(n);case 4:(r=e.sent)instanceof Error||r.status>=400?console.error(r.statusText+" "+n.url):t.openIdConnectData=JSON.parse(r.text),e.next=11;break;case 8:e.prev=8,e.t0=e.catch(1),console.error(e.t0);case 11:case"end":return e.stop()}}),e,null,[[1,8]])})));return function(t){return e.apply(this,arguments)}}()));case 15:return Q()(c,o,O),Q()(f,o,O),e.abrupt("return",{resultMap:c,specWithCurrentSubtrees:f});case 18:case"end":return e.stop()}}),e)})));return function(t,n){return e.apply(this,arguments)}}(),S.a.resolve({resultMap:(l.specResolvedSubtree([])||Object(q.Map)()).toJS(),specWithCurrentSubtrees:l.specJson().toJS()}));case 14:O=e.sent,delete Se.system,Se=[],e.next=22;break;case 19:e.prev=19,e.t0=e.catch(11),console.error(e.t0);case 22:f.updateResolvedSubtree([],O.resultMap);case 23:case"end":return e.stop()}}),e,null,[[11,19]])}))),35),Ae=function(e){return function(t){var n;T()(n=v()(Se).call(Se,(function(e){return e.join("@@")}))).call(n,e.join("@@"))>-1||(Se.push(e),Se.system=t,ke())}};function Oe(e,t,n,r,o){return{type:re,payload:{path:e,value:r,paramName:t,paramIn:n,isXml:o}}}function Ce(e,t,n,r){return{type:re,payload:{path:e,param:t,value:n,isXml:r}}}var je=function(e,t){return{type:me,payload:{path:e,value:t}}},Te=function(){return{type:me,payload:{path:[],value:Object(q.Map)()}}},Ie=function(e,t){return{type:ae,payload:{pathMethod:e,isOAS3:t}}},Pe=function(e,t,n,r){return{type:oe,payload:{pathMethod:e,paramName:t,paramIn:n,includeEmptyValue:r}}};function Ne(e){return{type:pe,payload:{pathMethod:e}}}function Me(e,t){return{type:he,payload:{path:e,value:t,key:"consumes_value"}}}function Re(e,t){return{type:he,payload:{path:e,value:t,key:"produces_value"}}}var De=function(e,t,n){return{payload:{path:e,method:t,res:n},type:ie}},Le=function(e,t,n){return{payload:{path:e,method:t,req:n},type:ue}},Be=function(e,t,n){return{payload:{path:e,method:t,req:n},type:se}},Fe=function(e){return{payload:e,type:ce}},ze=function(e){return function(t){var n,r,o=t.fn,a=t.specActions,i=t.specSelectors,u=t.getConfigs,c=t.oas3Selectors,l=e.pathName,f=e.method,h=e.operation,m=u(),g=m.requestInterceptor,y=m.responseInterceptor,b=h.toJS();h&&h.get("parameters")&&P()(n=A()(r=h.get("parameters")).call(r,(function(e){return e&&!0===e.get("allowEmptyValue")}))).call(n,(function(t){if(i.parameterInclusionSettingFor([l,f],t.get("name"),t.get("in"))){e.parameters=e.parameters||{};var n=Object(Z.B)(t,e.parameters);(!n||n&&0===n.size)&&(e.parameters[t.get("name")]="")}}));if(e.contextUrl=W()(i.url()).toString(),b&&b.operationId?e.operationId=b.operationId:b&&l&&f&&(e.operationId=o.opId(b,l,f)),i.isOAS3()){var w,x=M()(w="".concat(l,":")).call(w,f);e.server=c.selectedServer(x)||c.selectedServer();var E=c.serverVariables({server:e.server,namespace:x}).toJS(),_=c.serverVariables({server:e.server}).toJS();e.serverVariables=D()(E).length?E:_,e.requestContentType=c.requestContentType(l,f),e.responseContentType=c.responseContentType(l,f)||"*/*";var S,k=c.requestBodyValue(l,f),O=c.requestBodyInclusionSetting(l,f);if(k&&k.toJS)e.requestBody=A()(S=v()(k).call(k,(function(e){return q.Map.isMap(e)?e.get("value"):e}))).call(S,(function(e,t){return(d()(e)?0!==e.length:!Object(Z.q)(e))||O.get(t)})).toJS();else e.requestBody=k}var C=B()({},e);C=o.buildRequest(C),a.setRequest(e.pathName,e.method,C);var j=function(){var t=s()(p.a.mark((function t(n){var r,o;return p.a.wrap((function(t){for(;;)switch(t.prev=t.next){case 0:return t.next=2,g.apply(undefined,[n]);case 2:return r=t.sent,o=B()({},r),a.setMutatedRequest(e.pathName,e.method,o),t.abrupt("return",r);case 6:case"end":return t.stop()}}),t)})));return function(e){return t.apply(this,arguments)}}();e.requestInterceptor=j,e.responseInterceptor=y;var T=z()();return o.execute(e).then((function(t){t.duration=z()()-T,a.setResponse(e.pathName,e.method,t)})).catch((function(t){"Failed to fetch"===t.message&&(t.name="",t.message='**Failed to fetch.** \n**Possible Reasons:** \n - CORS \n - Network Failure \n - URL scheme must be "http" or "https" for CORS request.'),a.setResponse(e.pathName,e.method,{error:!0,err:Object(H.serializeError)(t)})}))}},Ue=function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},t=e.path,n=e.method,r=i()(e,X);return function(e){var a=e.fn.fetch,i=e.specSelectors,u=e.specActions,s=i.specJsonWithResolvedSubtrees().toJS(),c=i.operationScheme(t,n),l=i.contentTypeValues([t,n]).toJS(),f=l.requestContentType,p=l.responseContentType,h=/xml/i.test(f),d=i.parameterValues([t,n],h).toJS();return u.executeRequest(o()(o()({},r),{},{fetch:a,spec:s,pathName:t,method:n,parameters:d,requestContentType:f,scheme:c,responseContentType:p}))}};function qe(e,t){return{type:le,payload:{path:e,method:t}}}function Ve(e,t){return{type:fe,payload:{path:e,method:t}}}function We(e,t,n){return{type:ve,payload:{scheme:e,path:t,method:n}}}},function(e,t,n){var r;!function(){"use strict";var n={}.hasOwnProperty;function o(){for(var e=[],t=0;t=e.length?{done:!0}:{done:!1,value:e[u++]}},e:function(e){throw e},f:s}}throw new TypeError("Invalid attempt to iterate non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}var c,l=!0,f=!1;return{s:function(){n=n.call(e)},n:function(){var e=n.next();return l=e.done,e},e:function(e){f=!0,c=e},f:function(){try{l||null==n.return||n.return()}finally{if(f)throw c}}}},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t,n){var r=n(35),o=n(17),a=n(42),i=function(e){return a(e)?e:void 0};e.exports=function(e,t){return arguments.length<2?i(r[e])||i(o[e]):r[e]&&r[e][t]||o[e]&&o[e][t]}},function(e,t){var n=Array.isArray;e.exports=n},function(e,t,n){var r=n(426),o=n(253),a=n(850);e.exports=function(e,t){if(null==e)return{};var n,i,u=a(e,t);if(r){var s=r(e);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(u[n]=e[n])}return u},e.exports.__esModule=!0,e.exports.default=e.exports},function(e,t,n){"use strict";n.r(t),n.d(t,"UPDATE_SELECTED_SERVER",(function(){return r})),n.d(t,"UPDATE_REQUEST_BODY_VALUE",(function(){return o})),n.d(t,"UPDATE_REQUEST_BODY_VALUE_RETAIN_FLAG",(function(){return a})),n.d(t,"UPDATE_REQUEST_BODY_INCLUSION",(function(){return i})),n.d(t,"UPDATE_ACTIVE_EXAMPLES_MEMBER",(function(){return u})),n.d(t,"UPDATE_REQUEST_CONTENT_TYPE",(function(){return s})),n.d(t,"UPDATE_RESPONSE_CONTENT_TYPE",(function(){return c})),n.d(t,"UPDATE_SERVER_VARIABLE_VALUE",(function(){return l})),n.d(t,"SET_REQUEST_BODY_VALIDATE_ERROR",(function(){return f})),n.d(t,"CLEAR_REQUEST_BODY_VALIDATE_ERROR",(function(){return p})),n.d(t,"CLEAR_REQUEST_BODY_VALUE",(function(){return h})),n.d(t,"setSelectedServer",(function(){return d})),n.d(t,"setRequestBodyValue",(function(){return m})),n.d(t,"setRetainRequestBodyValueFlag",(function(){return v})),n.d(t,"setRequestBodyInclusion",(function(){return g})),n.d(t,"setActiveExamplesMember",(function(){return y})),n.d(t,"setRequestContentType",(function(){return b})),n.d(t,"setResponseContentType",(function(){return w})),n.d(t,"setServerVariableValue",(function(){return x})),n.d(t,"setRequestBodyValidateError",(function(){return E})),n.d(t,"clearRequestBodyValidateError",(function(){return _})),n.d(t,"initRequestBodyValidateError",(function(){return S})),n.d(t,"clearRequestBodyValue",(function(){return k}));var r="oas3_set_servers",o="oas3_set_request_body_value",a="oas3_set_request_body_retain_flag",i="oas3_set_request_body_inclusion",u="oas3_set_active_examples_member",s="oas3_set_request_content_type",c="oas3_set_response_content_type",l="oas3_set_server_variable_value",f="oas3_set_request_body_validate_error",p="oas3_clear_request_body_validate_error",h="oas3_clear_request_body_value";function d(e,t){return{type:r,payload:{selectedServerUrl:e,namespace:t}}}function m(e){var t=e.value,n=e.pathMethod;return{type:o,payload:{value:t,pathMethod:n}}}var v=function(e){var t=e.value,n=e.pathMethod;return{type:a,payload:{value:t,pathMethod:n}}};function g(e){var t=e.value,n=e.pathMethod,r=e.name;return{type:i,payload:{value:t,pathMethod:n,name:r}}}function y(e){var t=e.name,n=e.pathMethod,r=e.contextType,o=e.contextName;return{type:u,payload:{name:t,pathMethod:n,contextType:r,contextName:o}}}function b(e){var t=e.value,n=e.pathMethod;return{type:s,payload:{value:t,pathMethod:n}}}function w(e){var t=e.value,n=e.path,r=e.method;return{type:c,payload:{value:t,path:n,method:r}}}function x(e){var t=e.server,n=e.namespace,r=e.key,o=e.val;return{type:l,payload:{server:t,namespace:n,key:r,val:o}}}var E=function(e){var t=e.path,n=e.method,r=e.validationErrors;return{type:f,payload:{path:t,method:n,validationErrors:r}}},_=function(e){var t=e.path,n=e.method;return{type:p,payload:{path:t,method:n}}},S=function(e){var t=e.pathMethod;return{type:p,payload:{path:t[0],method:t[1]}}},k=function(e){var t=e.pathMethod;return{type:h,payload:{pathMethod:t}}}},function(e,t){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(e){"object"==typeof window&&(n=window)}e.exports=n},function(e,t,n){var r=n(17),o=n(114),a=r.Object;e.exports=function(e){return a(o(e))}},function(e,t,n){var r=n(17),o=n(51),a=n(334),i=n(336),u=n(33),s=n(177),c=r.TypeError,l=Object.defineProperty,f=Object.getOwnPropertyDescriptor,p="enumerable",h="configurable",d="writable";t.f=o?i?function(e,t,n){if(u(e),t=s(t),u(n),"function"==typeof e&&"prototype"===t&&"value"in n&&d in n&&!n.writable){var r=f(e,t);r&&r.writable&&(e[t]=n.value,n={configurable:h in n?n.configurable:r.configurable,enumerable:p in n?n.enumerable:r.enumerable,writable:!1})}return l(e,t,n)}:l:function(e,t,n){if(u(e),t=s(t),u(n),a)try{return l(e,t,n)}catch(e){}if("get"in n||"set"in n)throw c("Accessors not supported");return"value"in n&&(e[t]=n.value),e}},function(e,t,n){"use strict";n.d(t,"b",(function(){return m})),n.d(t,"e",(function(){return v})),n.d(t,"c",(function(){return y})),n.d(t,"a",(function(){return b})),n.d(t,"d",(function(){return w}));var r=n(56),o=n.n(r),a=n(19),i=n.n(a),u=n(109),s=n.n(u),c=n(2),l=n.n(c),f=n(32),p=n.n(f),h=function(e){return String.prototype.toLowerCase.call(e)},d=function(e){return e.replace(/[^\w]/gi,"_")};function m(e){var t=e.openapi;return!!t&&s()(t).call(t,"3")}function v(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"",r=arguments.length>3&&void 0!==arguments[3]?arguments[3]:{},o=r.v2OperationIdCompatibilityMode;if(!e||"object"!==i()(e))return null;var a=(e.operationId||"").replace(/\s/g,"");return a.length?d(e.operationId):g(t,n,{v2OperationIdCompatibilityMode:o})}function g(e,t){var n,r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{},o=r.v2OperationIdCompatibilityMode;if(o){var a,i,u=l()(a="".concat(t.toLowerCase(),"_")).call(a,e).replace(/[\s!@#$%^&*()_+=[{\]};:<>|./?,\\'""-]/g,"_");return(u=u||l()(i="".concat(e.substring(1),"_")).call(i,t)).replace(/((_){2,})/g,"_").replace(/^(_)*/g,"").replace(/([_])*$/g,"")}return l()(n="".concat(h(t))).call(n,d(e))}function y(e,t){var n;return l()(n="".concat(h(t),"-")).call(n,e)}function b(e,t){return e&&e.paths?function(e,t){return function(e,t,n){if(!e||"object"!==i()(e)||!e.paths||"object"!==i()(e.paths))return null;var r=e.paths;for(var o in r)for(var a in r[o])if("PARAMETERS"!==a.toUpperCase()){var u=r[o][a];if(u&&"object"===i()(u)){var s={spec:e,pathName:o,method:a.toUpperCase(),operation:u},c=t(s);if(n&&c)return s}}return}(e,t,!0)||null}(e,(function(e){var n=e.pathName,r=e.method,o=e.operation;if(!o||"object"!==i()(o))return!1;var a=o.operationId;return[v(o,n,r),y(n,r),a].some((function(e){return e&&e===t}))})):null}function w(e){var t=e.spec,n=t.paths,r={};if(!n||t.$$normalized)return e;for(var a in n){var u,s=n[a];if(null!=s&&p()(u=["object","function"]).call(u,i()(s))){var c=s.parameters,f=function(e){var n,u=s[e];if(null==u||!p()(n=["object","function"]).call(n,i()(u)))return"continue";var f=v(u,a,e);if(f){r[f]?r[f].push(u):r[f]=[u];var h=r[f];if(h.length>1)h.forEach((function(e,t){var n;e.__originalOperationId=e.__originalOperationId||e.operationId,e.operationId=l()(n="".concat(f)).call(n,t+1)}));else if(void 0!==u.operationId){var d=h[0];d.__originalOperationId=d.__originalOperationId||u.operationId,d.operationId=f}}if("parameters"!==e){var m=[],g={};for(var y in t)"produces"!==y&&"consumes"!==y&&"security"!==y||(g[y]=t[y],m.push(g));if(c&&(g.parameters=c,m.push(g)),m.length){var b,w=o()(m);try{for(w.s();!(b=w.n()).done;){var x=b.value;for(var E in x)if(u[E]){if("parameters"===E){var _,S=o()(x[E]);try{var k=function(){var e=_.value;u[E].some((function(t){return t.name&&t.name===e.name||t.$ref&&t.$ref===e.$ref||t.$$ref&&t.$$ref===e.$$ref||t===e}))||u[E].push(e)};for(S.s();!(_=S.n()).done;)k()}catch(e){S.e(e)}finally{S.f()}}}else u[E]=x[E]}}catch(e){w.e(e)}finally{w.f()}}}};for(var h in s)f(h)}}return t.$$normalized=!0,e}},function(e,t,n){"use strict";n.r(t),n.d(t,"NEW_THROWN_ERR",(function(){return o})),n.d(t,"NEW_THROWN_ERR_BATCH",(function(){return a})),n.d(t,"NEW_SPEC_ERR",(function(){return i})),n.d(t,"NEW_SPEC_ERR_BATCH",(function(){return u})),n.d(t,"NEW_AUTH_ERR",(function(){return s})),n.d(t,"CLEAR",(function(){return c})),n.d(t,"CLEAR_BY",(function(){return l})),n.d(t,"newThrownErr",(function(){return f})),n.d(t,"newThrownErrBatch",(function(){return p})),n.d(t,"newSpecErr",(function(){return h})),n.d(t,"newSpecErrBatch",(function(){return d})),n.d(t,"newAuthErr",(function(){return m})),n.d(t,"clear",(function(){return v})),n.d(t,"clearBy",(function(){return g}));var r=n(138),o="err_new_thrown_err",a="err_new_thrown_err_batch",i="err_new_spec_err",u="err_new_spec_err_batch",s="err_new_auth_err",c="err_clear",l="err_clear_by";function f(e){return{type:o,payload:Object(r.serializeError)(e)}}function p(e){return{type:a,payload:e}}function h(e){return{type:i,payload:e}}function d(e){return{type:u,payload:e}}function m(e){return{type:s,payload:e}}function v(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return{type:c,payload:e}}function g(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:function(){return!0};return{type:l,payload:e}}},function(e,t,n){var r=n(176),o=n(114);e.exports=function(e){return r(o(e))}},function(e,t,n){var r=n(337);e.exports=function(e){return r(e.length)}},function(e,t){e.exports=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){t&&(e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}))}:e.exports=function(e,t){if(t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}}},function(e,t,n){var r=n(129),o=r.Buffer;function a(e,t){for(var n in e)t[n]=e[n]}function i(e,t,n){return o(e,t,n)}o.from&&o.alloc&&o.allocUnsafe&&o.allocUnsafeSlow?e.exports=r:(a(r,t),t.Buffer=i),a(o,i),i.from=function(e,t,n){if("number"==typeof e)throw new TypeError("Argument must not be a number");return o(e,t,n)},i.alloc=function(e,t,n){if("number"!=typeof e)throw new TypeError("Argument must be a number");var r=o(e);return void 0!==t?"string"==typeof n?r.fill(t,n):r.fill(t):r.fill(0),r},i.allocUnsafe=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return o(e)},i.allocUnsafeSlow=function(e){if("number"!=typeof e)throw new TypeError("Argument must be a number");return r.SlowBuffer(e)}},function(e,t,n){e.exports=n(381)},function(e,t,n){e.exports=n(428)},function(e,t,n){var r=n(17),o=n(80),a=r.String;e.exports=function(e){if("Symbol"===o(e))throw TypeError("Cannot convert a Symbol value to a string");return a(e)}},function(e,t,n){n(83);var r=n(506),o=n(17),a=n(80),i=n(90),u=n(127),s=n(40)("toStringTag");for(var c in r){var l=o[c],f=l&&l.prototype;f&&a(f)!==s&&i(f,s,c),u[c]=u.Array}},function(e,t,n){var r=n(361),o="object"==typeof self&&self&&self.Object===Object&&self,a=r||o||Function("return this")();e.exports=a},function(e,t,n){"use strict";function r(e){return null==e}var o={isNothing:r,isObject:function(e){return"object"==typeof e&&null!==e},toArray:function(e){return Array.isArray(e)?e:r(e)?[]:[e]},repeat:function(e,t){var n,r="";for(n=0;nu&&(t=r-u+(a=" ... ").length),n-r>u&&(n=r+u-(i=" ...").length),{str:a+e.slice(t,n).replace(/\t/g,"→")+i,pos:r-t+a.length}}function c(e,t){return o.repeat(" ",t-e.length)+e}var l=function(e,t){if(t=Object.create(t||null),!e.buffer)return null;t.maxLength||(t.maxLength=79),"number"!=typeof t.indent&&(t.indent=1),"number"!=typeof t.linesBefore&&(t.linesBefore=3),"number"!=typeof t.linesAfter&&(t.linesAfter=2);for(var n,r=/\r?\n|\r|\0/g,a=[0],i=[],u=-1;n=r.exec(e.buffer);)i.push(n.index),a.push(n.index+n[0].length),e.position<=n.index&&u<0&&(u=a.length-2);u<0&&(u=a.length-1);var l,f,p="",h=Math.min(e.line+t.linesAfter,i.length).toString().length,d=t.maxLength-(t.indent+h+3);for(l=1;l<=t.linesBefore&&!(u-l<0);l++)f=s(e.buffer,a[u-l],i[u-l],e.position-(a[u]-a[u-l]),d),p=o.repeat(" ",t.indent)+c((e.line-l+1).toString(),h)+" | "+f.str+"\n"+p;for(f=s(e.buffer,a[u],i[u],e.position,d),p+=o.repeat(" ",t.indent)+c((e.line+1).toString(),h)+" | "+f.str+"\n",p+=o.repeat("-",t.indent+h+3+f.pos)+"^\n",l=1;l<=t.linesAfter&&!(u+l>=i.length);l++)f=s(e.buffer,a[u+l],i[u+l],e.position-(a[u]-a[u+l]),d),p+=o.repeat(" ",t.indent)+c((e.line+l+1).toString(),h)+" | "+f.str+"\n";return p.replace(/\n$/,"")},f=["kind","multi","resolve","construct","instanceOf","predicate","represent","representName","defaultStyle","styleAliases"],p=["scalar","sequence","mapping"];var h=function(e,t){if(t=t||{},Object.keys(t).forEach((function(t){if(-1===f.indexOf(t))throw new u('Unknown option "'+t+'" is met in definition of "'+e+'" YAML type.')})),this.options=t,this.tag=e,this.kind=t.kind||null,this.resolve=t.resolve||function(){return!0},this.construct=t.construct||function(e){return e},this.instanceOf=t.instanceOf||null,this.predicate=t.predicate||null,this.represent=t.represent||null,this.representName=t.representName||null,this.defaultStyle=t.defaultStyle||null,this.multi=t.multi||!1,this.styleAliases=function(e){var t={};return null!==e&&Object.keys(e).forEach((function(n){e[n].forEach((function(e){t[String(e)]=n}))})),t}(t.styleAliases||null),-1===p.indexOf(this.kind))throw new u('Unknown kind "'+this.kind+'" is specified for "'+e+'" YAML type.')};function d(e,t){var n=[];return e[t].forEach((function(e){var t=n.length;n.forEach((function(n,r){n.tag===e.tag&&n.kind===e.kind&&n.multi===e.multi&&(t=r)})),n[t]=e})),n}function m(e){return this.extend(e)}m.prototype.extend=function(e){var t=[],n=[];if(e instanceof h)n.push(e);else if(Array.isArray(e))n=n.concat(e);else{if(!e||!Array.isArray(e.implicit)&&!Array.isArray(e.explicit))throw new u("Schema.extend argument should be a Type, [ Type ], or a schema definition ({ implicit: [...], explicit: [...] })");e.implicit&&(t=t.concat(e.implicit)),e.explicit&&(n=n.concat(e.explicit))}t.forEach((function(e){if(!(e instanceof h))throw new u("Specified list of YAML types (or a single Type object) contains a non-Type object.");if(e.loadKind&&"scalar"!==e.loadKind)throw new u("There is a non-scalar type in the implicit list of a schema. Implicit resolving of such types is not supported.");if(e.multi)throw new u("There is a multi type in the implicit list of a schema. Multi tags can only be listed as explicit.")})),n.forEach((function(e){if(!(e instanceof h))throw new u("Specified list of YAML types (or a single Type object) contains a non-Type object.")}));var r=Object.create(m.prototype);return r.implicit=(this.implicit||[]).concat(t),r.explicit=(this.explicit||[]).concat(n),r.compiledImplicit=d(r,"implicit"),r.compiledExplicit=d(r,"explicit"),r.compiledTypeMap=function(){var e,t,n={scalar:{},sequence:{},mapping:{},fallback:{},multi:{scalar:[],sequence:[],mapping:[],fallback:[]}};function r(e){e.multi?(n.multi[e.kind].push(e),n.multi.fallback.push(e)):n[e.kind][e.tag]=n.fallback[e.tag]=e}for(e=0,t=arguments.length;e=0?"0b"+e.toString(2):"-0b"+e.toString(2).slice(1)},octal:function(e){return e>=0?"0o"+e.toString(8):"-0o"+e.toString(8).slice(1)},decimal:function(e){return e.toString(10)},hexadecimal:function(e){return e>=0?"0x"+e.toString(16).toUpperCase():"-0x"+e.toString(16).toUpperCase().slice(1)}},defaultStyle:"decimal",styleAliases:{binary:[2,"bin"],octal:[8,"oct"],decimal:[10,"dec"],hexadecimal:[16,"hex"]}}),A=new RegExp("^(?:[-+]?(?:[0-9][0-9_]*)(?:\\.[0-9_]*)?(?:[eE][-+]?[0-9]+)?|\\.[0-9_]+(?:[eE][-+]?[0-9]+)?|[-+]?\\.(?:inf|Inf|INF)|\\.(?:nan|NaN|NAN))$");var O=/^[-+]?[0-9]+e/;var C=new h("tag:yaml.org,2002:float",{kind:"scalar",resolve:function(e){return null!==e&&!(!A.test(e)||"_"===e[e.length-1])},construct:function(e){var t,n;return n="-"===(t=e.replace(/_/g,"").toLowerCase())[0]?-1:1,"+-".indexOf(t[0])>=0&&(t=t.slice(1)),".inf"===t?1===n?Number.POSITIVE_INFINITY:Number.NEGATIVE_INFINITY:".nan"===t?NaN:n*parseFloat(t,10)},predicate:function(e){return"[object Number]"===Object.prototype.toString.call(e)&&(e%1!=0||o.isNegativeZero(e))},represent:function(e,t){var n;if(isNaN(e))switch(t){case"lowercase":return".nan";case"uppercase":return".NAN";case"camelcase":return".NaN"}else if(Number.POSITIVE_INFINITY===e)switch(t){case"lowercase":return".inf";case"uppercase":return".INF";case"camelcase":return".Inf"}else if(Number.NEGATIVE_INFINITY===e)switch(t){case"lowercase":return"-.inf";case"uppercase":return"-.INF";case"camelcase":return"-.Inf"}else if(o.isNegativeZero(e))return"-0.0";return n=e.toString(10),O.test(n)?n.replace("e",".e"):n},defaultStyle:"lowercase"}),j=w.extend({implicit:[x,E,k,C]}),T=j,I=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])$"),P=new RegExp("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:[Tt]|[ \\t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \\t]*(Z|([-+])([0-9][0-9]?)(?::([0-9][0-9]))?))?$");var N=new h("tag:yaml.org,2002:timestamp",{kind:"scalar",resolve:function(e){return null!==e&&(null!==I.exec(e)||null!==P.exec(e))},construct:function(e){var t,n,r,o,a,i,u,s,c=0,l=null;if(null===(t=I.exec(e))&&(t=P.exec(e)),null===t)throw new Error("Date resolve error");if(n=+t[1],r=+t[2]-1,o=+t[3],!t[4])return new Date(Date.UTC(n,r,o));if(a=+t[4],i=+t[5],u=+t[6],t[7]){for(c=t[7].slice(0,3);c.length<3;)c+="0";c=+c}return t[9]&&(l=6e4*(60*+t[10]+ +(t[11]||0)),"-"===t[9]&&(l=-l)),s=new Date(Date.UTC(n,r,o,a,i,u,c)),l&&s.setTime(s.getTime()-l),s},instanceOf:Date,represent:function(e){return e.toISOString()}});var M=new h("tag:yaml.org,2002:merge",{kind:"scalar",resolve:function(e){return"<<"===e||null===e}}),R="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\n\r";var D=new h("tag:yaml.org,2002:binary",{kind:"scalar",resolve:function(e){if(null===e)return!1;var t,n,r=0,o=e.length,a=R;for(n=0;n64)){if(t<0)return!1;r+=6}return r%8==0},construct:function(e){var t,n,r=e.replace(/[\r\n=]/g,""),o=r.length,a=R,i=0,u=[];for(t=0;t>16&255),u.push(i>>8&255),u.push(255&i)),i=i<<6|a.indexOf(r.charAt(t));return 0===(n=o%4*6)?(u.push(i>>16&255),u.push(i>>8&255),u.push(255&i)):18===n?(u.push(i>>10&255),u.push(i>>2&255)):12===n&&u.push(i>>4&255),new Uint8Array(u)},predicate:function(e){return"[object Uint8Array]"===Object.prototype.toString.call(e)},represent:function(e){var t,n,r="",o=0,a=e.length,i=R;for(t=0;t>18&63],r+=i[o>>12&63],r+=i[o>>6&63],r+=i[63&o]),o=(o<<8)+e[t];return 0===(n=a%3)?(r+=i[o>>18&63],r+=i[o>>12&63],r+=i[o>>6&63],r+=i[63&o]):2===n?(r+=i[o>>10&63],r+=i[o>>4&63],r+=i[o<<2&63],r+=i[64]):1===n&&(r+=i[o>>2&63],r+=i[o<<4&63],r+=i[64],r+=i[64]),r}}),L=Object.prototype.hasOwnProperty,B=Object.prototype.toString;var F=new h("tag:yaml.org,2002:omap",{kind:"sequence",resolve:function(e){if(null===e)return!0;var t,n,r,o,a,i=[],u=e;for(t=0,n=u.length;t>10),56320+(e-65536&1023))}for(var ae=new Array(256),ie=new Array(256),ue=0;ue<256;ue++)ae[ue]=re(ue)?1:0,ie[ue]=re(ue);function se(e,t){this.input=e,this.filename=t.filename||null,this.schema=t.schema||W,this.onWarning=t.onWarning||null,this.legacy=t.legacy||!1,this.json=t.json||!1,this.listener=t.listener||null,this.implicitTypes=this.schema.compiledImplicit,this.typeMap=this.schema.compiledTypeMap,this.length=e.length,this.position=0,this.line=0,this.lineStart=0,this.lineIndent=0,this.firstTabInLine=-1,this.documents=[]}function ce(e,t){var n={name:e.filename,buffer:e.input.slice(0,-1),position:e.position,line:e.line,column:e.position-e.lineStart};return n.snippet=l(n),new u(t,n)}function le(e,t){throw ce(e,t)}function fe(e,t){e.onWarning&&e.onWarning.call(null,ce(e,t))}var pe={YAML:function(e,t,n){var r,o,a;null!==e.version&&le(e,"duplication of %YAML directive"),1!==n.length&&le(e,"YAML directive accepts exactly one argument"),null===(r=/^([0-9]+)\.([0-9]+)$/.exec(n[0]))&&le(e,"ill-formed argument of the YAML directive"),o=parseInt(r[1],10),a=parseInt(r[2],10),1!==o&&le(e,"unacceptable YAML version of the document"),e.version=n[0],e.checkLineBreaks=a<2,1!==a&&2!==a&&fe(e,"unsupported YAML version of the document")},TAG:function(e,t,n){var r,o;2!==n.length&&le(e,"TAG directive accepts exactly two arguments"),r=n[0],o=n[1],Y.test(r)||le(e,"ill-formed tag handle (first argument) of the TAG directive"),H.call(e.tagMap,r)&&le(e,'there is a previously declared suffix for "'+r+'" tag handle'),G.test(o)||le(e,"ill-formed tag prefix (second argument) of the TAG directive");try{o=decodeURIComponent(o)}catch(t){le(e,"tag prefix is malformed: "+o)}e.tagMap[r]=o}};function he(e,t,n,r){var o,a,i,u;if(t1&&(e.result+=o.repeat("\n",t-1))}function we(e,t){var n,r,o=e.tag,a=e.anchor,i=[],u=!1;if(-1!==e.firstTabInLine)return!1;for(null!==e.anchor&&(e.anchorMap[e.anchor]=i),r=e.input.charCodeAt(e.position);0!==r&&(-1!==e.firstTabInLine&&(e.position=e.firstTabInLine,le(e,"tab characters must not be used in indentation")),45===r)&&ee(e.input.charCodeAt(e.position+1));)if(u=!0,e.position++,ge(e,!0,-1)&&e.lineIndent<=t)i.push(null),r=e.input.charCodeAt(e.position);else if(n=e.line,_e(e,t,3,!1,!0),i.push(e.result),ge(e,!0,-1),r=e.input.charCodeAt(e.position),(e.line===n||e.lineIndent>t)&&0!==r)le(e,"bad indentation of a sequence entry");else if(e.lineIndentt?m=1:e.lineIndent===t?m=0:e.lineIndentt?m=1:e.lineIndent===t?m=0:e.lineIndentt)&&(g&&(i=e.line,u=e.lineStart,s=e.position),_e(e,t,4,!0,o)&&(g?m=e.result:v=e.result),g||(me(e,p,h,d,m,v,i,u,s),d=m=v=null),ge(e,!0,-1),c=e.input.charCodeAt(e.position)),(e.line===a||e.lineIndent>t)&&0!==c)le(e,"bad indentation of a mapping entry");else if(e.lineIndent=0))break;0===a?le(e,"bad explicit indentation width of a block scalar; it cannot be less than one"):l?le(e,"repeat of an indentation width identifier"):(f=t+a-1,l=!0)}if(X(i)){do{i=e.input.charCodeAt(++e.position)}while(X(i));if(35===i)do{i=e.input.charCodeAt(++e.position)}while(!Z(i)&&0!==i)}for(;0!==i;){for(ve(e),e.lineIndent=0,i=e.input.charCodeAt(e.position);(!l||e.lineIndentf&&(f=e.lineIndent),Z(i))p++;else{if(e.lineIndent0){for(o=i,a=0;o>0;o--)(i=ne(u=e.input.charCodeAt(++e.position)))>=0?a=(a<<4)+i:le(e,"expected hexadecimal character");e.result+=oe(a),e.position++}else le(e,"unknown escape sequence");n=r=e.position}else Z(u)?(he(e,n,r,!0),be(e,ge(e,!1,t)),n=r=e.position):e.position===e.lineStart&&ye(e)?le(e,"unexpected end of the document within a double quoted scalar"):(e.position++,r=e.position)}le(e,"unexpected end of the stream within a double quoted scalar")}(e,h)?g=!0:!function(e){var t,n,r;if(42!==(r=e.input.charCodeAt(e.position)))return!1;for(r=e.input.charCodeAt(++e.position),t=e.position;0!==r&&!ee(r)&&!te(r);)r=e.input.charCodeAt(++e.position);return e.position===t&&le(e,"name of an alias node must contain at least one character"),n=e.input.slice(t,e.position),H.call(e.anchorMap,n)||le(e,'unidentified alias "'+n+'"'),e.result=e.anchorMap[n],ge(e,!0,-1),!0}(e)?function(e,t,n){var r,o,a,i,u,s,c,l,f=e.kind,p=e.result;if(ee(l=e.input.charCodeAt(e.position))||te(l)||35===l||38===l||42===l||33===l||124===l||62===l||39===l||34===l||37===l||64===l||96===l)return!1;if((63===l||45===l)&&(ee(r=e.input.charCodeAt(e.position+1))||n&&te(r)))return!1;for(e.kind="scalar",e.result="",o=a=e.position,i=!1;0!==l;){if(58===l){if(ee(r=e.input.charCodeAt(e.position+1))||n&&te(r))break}else if(35===l){if(ee(e.input.charCodeAt(e.position-1)))break}else{if(e.position===e.lineStart&&ye(e)||n&&te(l))break;if(Z(l)){if(u=e.line,s=e.lineStart,c=e.lineIndent,ge(e,!1,-1),e.lineIndent>=t){i=!0,l=e.input.charCodeAt(e.position);continue}e.position=a,e.line=u,e.lineStart=s,e.lineIndent=c;break}}i&&(he(e,o,a,!1),be(e,e.line-u),o=a=e.position,i=!1),X(l)||(a=e.position+1),l=e.input.charCodeAt(++e.position)}return he(e,o,a,!1),!!e.result||(e.kind=f,e.result=p,!1)}(e,h,1===n)&&(g=!0,null===e.tag&&(e.tag="?")):(g=!0,null===e.tag&&null===e.anchor||le(e,"alias node should not have any properties")),null!==e.anchor&&(e.anchorMap[e.anchor]=e.result)):0===m&&(g=s&&we(e,d))),null===e.tag)null!==e.anchor&&(e.anchorMap[e.anchor]=e.result);else if("?"===e.tag){for(null!==e.result&&"scalar"!==e.kind&&le(e,'unacceptable node kind for ! tag; it should be "scalar", not "'+e.kind+'"'),c=0,l=e.implicitTypes.length;c"),null!==e.result&&p.kind!==e.kind&&le(e,"unacceptable node kind for !<"+e.tag+'> tag; it should be "'+p.kind+'", not "'+e.kind+'"'),p.resolve(e.result,e.tag)?(e.result=p.construct(e.result,e.tag),null!==e.anchor&&(e.anchorMap[e.anchor]=e.result)):le(e,"cannot resolve a node with !<"+e.tag+"> explicit tag")}return null!==e.listener&&e.listener("close",e),null!==e.tag||null!==e.anchor||g}function Se(e){var t,n,r,o,a=e.position,i=!1;for(e.version=null,e.checkLineBreaks=e.legacy,e.tagMap=Object.create(null),e.anchorMap=Object.create(null);0!==(o=e.input.charCodeAt(e.position))&&(ge(e,!0,-1),o=e.input.charCodeAt(e.position),!(e.lineIndent>0||37!==o));){for(i=!0,o=e.input.charCodeAt(++e.position),t=e.position;0!==o&&!ee(o);)o=e.input.charCodeAt(++e.position);for(r=[],(n=e.input.slice(t,e.position)).length<1&&le(e,"directive name must not be less than one character in length");0!==o;){for(;X(o);)o=e.input.charCodeAt(++e.position);if(35===o){do{o=e.input.charCodeAt(++e.position)}while(0!==o&&!Z(o));break}if(Z(o))break;for(t=e.position;0!==o&&!ee(o);)o=e.input.charCodeAt(++e.position);r.push(e.input.slice(t,e.position))}0!==o&&ve(e),H.call(pe,n)?pe[n](e,n,r):fe(e,'unknown document directive "'+n+'"')}ge(e,!0,-1),0===e.lineIndent&&45===e.input.charCodeAt(e.position)&&45===e.input.charCodeAt(e.position+1)&&45===e.input.charCodeAt(e.position+2)?(e.position+=3,ge(e,!0,-1)):i&&le(e,"directives end mark is expected"),_e(e,e.lineIndent-1,4,!1,!0),ge(e,!0,-1),e.checkLineBreaks&&J.test(e.input.slice(a,e.position))&&fe(e,"non-ASCII line breaks are interpreted as content"),e.documents.push(e.result),e.position===e.lineStart&&ye(e)?46===e.input.charCodeAt(e.position)&&(e.position+=3,ge(e,!0,-1)):e.position=55296&&r<=56319&&t+1=56320&&n<=57343?1024*(r-55296)+n-56320+65536:r}function qe(e){return/^\n* /.test(e)}function Ve(e,t,n,r,o,a,i,u){var s,c,l=0,f=null,p=!1,h=!1,d=-1!==r,m=-1,v=Be(c=Ue(e,0))&&c!==je&&!Le(c)&&45!==c&&63!==c&&58!==c&&44!==c&&91!==c&&93!==c&&123!==c&&125!==c&&35!==c&&38!==c&&42!==c&&33!==c&&124!==c&&61!==c&&62!==c&&39!==c&&34!==c&&37!==c&&64!==c&&96!==c&&function(e){return!Le(e)&&58!==e}(Ue(e,e.length-1));if(t||i)for(s=0;s=65536?s+=2:s++){if(!Be(l=Ue(e,s)))return 5;v=v&&ze(l,f,u),f=l}else{for(s=0;s=65536?s+=2:s++){if(10===(l=Ue(e,s)))p=!0,d&&(h=h||s-m-1>r&&" "!==e[m+1],m=s);else if(!Be(l))return 5;v=v&&ze(l,f,u),f=l}h=h||d&&s-m-1>r&&" "!==e[m+1]}return p||h?n>9&&qe(e)?5:i?2===a?5:2:h?4:3:!v||i||o(e)?2===a?5:2:1}function We(e,t,n,r,o){e.dump=function(){if(0===t.length)return 2===e.quotingType?'""':"''";if(!e.noCompatMode&&(-1!==Ie.indexOf(t)||Pe.test(t)))return 2===e.quotingType?'"'+t+'"':"'"+t+"'";var a=e.indent*Math.max(1,n),i=-1===e.lineWidth?-1:Math.max(Math.min(e.lineWidth,40),e.lineWidth-a),s=r||e.flowLevel>-1&&n>=e.flowLevel;switch(Ve(t,s,e.indent,i,(function(t){return function(e,t){var n,r;for(n=0,r=e.implicitTypes.length;n"+He(t,e.indent)+$e(Re(function(e,t){var n,r,o=/(\n+)([^\n]*)/g,a=(u=e.indexOf("\n"),u=-1!==u?u:e.length,o.lastIndex=u,Je(e.slice(0,u),t)),i="\n"===e[0]||" "===e[0];var u;for(;r=o.exec(e);){var s=r[1],c=r[2];n=" "===c[0],a+=s+(i||n||""===c?"":"\n")+Je(c,t),i=n}return a}(t,i),a));case 5:return'"'+function(e){for(var t,n="",r=0,o=0;o=65536?o+=2:o++)r=Ue(e,o),!(t=Te[r])&&Be(r)?(n+=e[o],r>=65536&&(n+=e[o+1])):n+=t||Ne(r);return n}(t)+'"';default:throw new u("impossible error: invalid scalar style")}}()}function He(e,t){var n=qe(e)?String(t):"",r="\n"===e[e.length-1];return n+(r&&("\n"===e[e.length-2]||"\n"===e)?"+":r?"":"-")+"\n"}function $e(e){return"\n"===e[e.length-1]?e.slice(0,-1):e}function Je(e,t){if(""===e||" "===e[0])return e;for(var n,r,o=/ [^ ]/g,a=0,i=0,u=0,s="";n=o.exec(e);)(u=n.index)-a>t&&(r=i>a?i:u,s+="\n"+e.slice(a,r),a=r+1),i=u;return s+="\n",e.length-a>t&&i>a?s+=e.slice(a,i)+"\n"+e.slice(i+1):s+=e.slice(a),s.slice(1)}function Ke(e,t,n,r){var o,a,i,u="",s=e.tag;for(o=0,a=n.length;o tag resolver accepts not "'+c+'" style');r=s.represent[c](t,c)}e.dump=r}return!0}return!1}function Ge(e,t,n,r,o,a,i){e.tag=null,e.dump=n,Ye(e,n,!1)||Ye(e,n,!0);var s,c=Oe.call(e.dump),l=r;r&&(r=e.flowLevel<0||e.flowLevel>t);var f,p,h="[object Object]"===c||"[object Array]"===c;if(h&&(p=-1!==(f=e.duplicates.indexOf(n))),(null!==e.tag&&"?"!==e.tag||p||2!==e.indent&&t>0)&&(o=!1),p&&e.usedDuplicates[f])e.dump="*ref_"+f;else{if(h&&p&&!e.usedDuplicates[f]&&(e.usedDuplicates[f]=!0),"[object Object]"===c)r&&0!==Object.keys(e.dump).length?(!function(e,t,n,r){var o,a,i,s,c,l,f="",p=e.tag,h=Object.keys(n);if(!0===e.sortKeys)h.sort();else if("function"==typeof e.sortKeys)h.sort(e.sortKeys);else if(e.sortKeys)throw new u("sortKeys must be a boolean or a function");for(o=0,a=h.length;o1024)&&(e.dump&&10===e.dump.charCodeAt(0)?l+="?":l+="? "),l+=e.dump,c&&(l+=De(e,t)),Ge(e,t+1,s,!0,c)&&(e.dump&&10===e.dump.charCodeAt(0)?l+=":":l+=": ",f+=l+=e.dump));e.tag=p,e.dump=f||"{}"}(e,t,e.dump,o),p&&(e.dump="&ref_"+f+e.dump)):(!function(e,t,n){var r,o,a,i,u,s="",c=e.tag,l=Object.keys(n);for(r=0,o=l.length;r1024&&(u+="? "),u+=e.dump+(e.condenseFlow?'"':"")+":"+(e.condenseFlow?"":" "),Ge(e,t,i,!1,!1)&&(s+=u+=e.dump));e.tag=c,e.dump="{"+s+"}"}(e,t,e.dump),p&&(e.dump="&ref_"+f+" "+e.dump));else if("[object Array]"===c)r&&0!==e.dump.length?(e.noArrayIndent&&!i&&t>0?Ke(e,t-1,e.dump,o):Ke(e,t,e.dump,o),p&&(e.dump="&ref_"+f+e.dump)):(!function(e,t,n){var r,o,a,i="",u=e.tag;for(r=0,o=n.length;r",e.dump=s+" "+e.dump)}return!0}function Qe(e,t){var n,r,o=[],a=[];for(Ze(e,o,a),n=0,r=a.length;nS;S++)if((h||S in x)&&(b=E(y=x[S],S,w),e))if(t)A[S]=b;else if(b)switch(e){case 3:return!0;case 5:return y;case 6:return S;case 2:c(A,y)}else switch(e){case 4:return!1;case 7:c(A,y)}return f?-1:o||l?l:A}};e.exports={forEach:l(0),map:l(1),filter:l(2),some:l(3),every:l(4),find:l(5),findIndex:l(6),filterReject:l(7)}},function(e,t,n){"use strict";var r=n(66),o=n(153),a=n(127),i=n(81),u=n(63).f,s=n(232),c=n(38),l=n(51),f="Array Iterator",p=i.set,h=i.getterFor(f);e.exports=s(Array,"Array",(function(e,t){p(this,{type:f,target:r(e),index:0,kind:t})}),(function(){var e=h(this),t=e.target,n=e.kind,r=e.index++;return!t||r>=t.length?(e.target=void 0,{value:void 0,done:!0}):"keys"==n?{value:r,done:!1}:"values"==n?{value:t[r],done:!1}:{value:[r,t[r]],done:!1}}),"values");var d=a.Arguments=a.Array;if(o("keys"),o("values"),o("entries"),!c&&l&&"values"!==d.name)try{u(d,"name",{value:"values"})}catch(e){}},function(e,t){e.exports=function(e){return null!=e&&"object"==typeof e}},function(e,t,n){var r=n(159);e.exports=r},function(e,t,n){e.exports=n(628)},function(e,t,n){"use strict";var r=n(957),o=n(958);function a(){this.protocol=null,this.slashes=null,this.auth=null,this.host=null,this.port=null,this.hostname=null,this.hash=null,this.search=null,this.query=null,this.pathname=null,this.path=null,this.href=null}t.parse=b,t.resolve=function(e,t){return b(e,!1,!0).resolve(t)},t.resolveObject=function(e,t){return e?b(e,!1,!0).resolveObject(t):t},t.format=function(e){o.isString(e)&&(e=b(e));return e instanceof a?e.format():a.prototype.format.call(e)},t.Url=a;var i=/^([a-z0-9.+-]+:)/i,u=/:[0-9]*$/,s=/^(\/\/?(?!\/)[^\?\s]*)(\?[^\s]*)?$/,c=["{","}","|","\\","^","`"].concat(["<",">",'"',"`"," ","\r","\n","\t"]),l=["'"].concat(c),f=["%","/","?",";","#"].concat(l),p=["/","?","#"],h=/^[+a-z0-9A-Z_-]{0,63}$/,d=/^([+a-z0-9A-Z_-]{0,63})(.*)$/,m={javascript:!0,"javascript:":!0},v={javascript:!0,"javascript:":!0},g={http:!0,https:!0,ftp:!0,gopher:!0,file:!0,"http:":!0,"https:":!0,"ftp:":!0,"gopher:":!0,"file:":!0},y=n(959);function b(e,t,n){if(e&&o.isObject(e)&&e instanceof a)return e;var r=new a;return r.parse(e,t,n),r}a.prototype.parse=function(e,t,n){if(!o.isString(e))throw new TypeError("Parameter 'url' must be a string, not "+typeof e);var a=e.indexOf("?"),u=-1!==a&&a127?N+="x":N+=P[M];if(!N.match(h)){var D=T.slice(0,O),L=T.slice(O+1),B=P.match(d);B&&(D.push(B[1]),L.unshift(B[2])),L.length&&(b="/"+L.join(".")+b),this.hostname=D.join(".");break}}}this.hostname.length>255?this.hostname="":this.hostname=this.hostname.toLowerCase(),j||(this.hostname=r.toASCII(this.hostname));var F=this.port?":"+this.port:"",z=this.hostname||"";this.host=z+F,this.href+=this.host,j&&(this.hostname=this.hostname.substr(1,this.hostname.length-2),"/"!==b[0]&&(b="/"+b))}if(!m[E])for(O=0,I=l.length;O0)&&n.host.split("@"))&&(n.auth=j.shift(),n.host=n.hostname=j.shift());return n.search=e.search,n.query=e.query,o.isNull(n.pathname)&&o.isNull(n.search)||(n.path=(n.pathname?n.pathname:"")+(n.search?n.search:"")),n.href=n.format(),n}if(!_.length)return n.pathname=null,n.search?n.path="/"+n.search:n.path=null,n.href=n.format(),n;for(var k=_.slice(-1)[0],A=(n.host||e.host||_.length>1)&&("."===k||".."===k)||""===k,O=0,C=_.length;C>=0;C--)"."===(k=_[C])?_.splice(C,1):".."===k?(_.splice(C,1),O++):O&&(_.splice(C,1),O--);if(!x&&!E)for(;O--;O)_.unshift("..");!x||""===_[0]||_[0]&&"/"===_[0].charAt(0)||_.unshift(""),A&&"/"!==_.join("/").substr(-1)&&_.push("");var j,T=""===_[0]||_[0]&&"/"===_[0].charAt(0);S&&(n.hostname=n.host=T?"":_.length?_.shift():"",(j=!!(n.host&&n.host.indexOf("@")>0)&&n.host.split("@"))&&(n.auth=j.shift(),n.host=n.hostname=j.shift()));return(x=x||n.host&&_.length)&&!T&&_.unshift(""),_.length?n.pathname=_.join("/"):(n.pathname=null,n.path=null),o.isNull(n.pathname)&&o.isNull(n.search)||(n.path=(n.pathname?n.pathname:"")+(n.search?n.search:"")),n.auth=e.auth||n.auth,n.slashes=n.slashes||e.slashes,n.href=n.format(),n},a.prototype.parseHost=function(){var e=this.host,t=u.exec(e);t&&(":"!==(t=t[0])&&(this.port=t.substr(1)),e=e.substr(0,e.length-t.length)),e&&(this.hostname=e)}},function(e,t,n){"use strict";n.r(t),n.d(t,"SHOW_AUTH_POPUP",(function(){return h})),n.d(t,"AUTHORIZE",(function(){return d})),n.d(t,"LOGOUT",(function(){return m})),n.d(t,"PRE_AUTHORIZE_OAUTH2",(function(){return v})),n.d(t,"AUTHORIZE_OAUTH2",(function(){return g})),n.d(t,"VALIDATE",(function(){return y})),n.d(t,"CONFIGURE_AUTH",(function(){return b})),n.d(t,"RESTORE_AUTHORIZATION",(function(){return w})),n.d(t,"showDefinitions",(function(){return x})),n.d(t,"authorize",(function(){return E})),n.d(t,"authorizeWithPersistOption",(function(){return _})),n.d(t,"logout",(function(){return S})),n.d(t,"logoutWithPersistOption",(function(){return k})),n.d(t,"preAuthorizeImplicit",(function(){return A})),n.d(t,"authorizeOauth2",(function(){return O})),n.d(t,"authorizeOauth2WithPersistOption",(function(){return C})),n.d(t,"authorizePassword",(function(){return j})),n.d(t,"authorizeApplication",(function(){return T})),n.d(t,"authorizeAccessCodeWithFormParams",(function(){return I})),n.d(t,"authorizeAccessCodeWithBasicAuthentication",(function(){return P})),n.d(t,"authorizeRequest",(function(){return N})),n.d(t,"configureAuth",(function(){return M})),n.d(t,"restoreAuthorization",(function(){return R})),n.d(t,"persistAuthorizationIfNeeded",(function(){return D}));var r=n(19),o=n.n(r),a=n(30),i=n.n(a),u=n(24),s=n.n(u),c=n(92),l=n.n(c),f=n(27),p=n(7),h="show_popup",d="authorize",m="logout",v="pre_authorize_oauth2",g="authorize_oauth2",y="validate",b="configure_auth",w="restore_authorization";function x(e){return{type:h,payload:e}}function E(e){return{type:d,payload:e}}var _=function(e){return function(t){var n=t.authActions;n.authorize(e),n.persistAuthorizationIfNeeded()}};function S(e){return{type:m,payload:e}}var k=function(e){return function(t){var n=t.authActions;n.logout(e),n.persistAuthorizationIfNeeded()}},A=function(e){return function(t){var n=t.authActions,r=t.errActions,o=e.auth,a=e.token,u=e.isValid,s=o.schema,c=o.name,l=s.get("flow");delete f.a.swaggerUIRedirectOauth2,"accessCode"===l||u||r.newAuthErr({authId:c,source:"auth",level:"warning",message:"Authorization may be unsafe, passed state was changed in server Passed state wasn't returned from auth server"}),a.error?r.newAuthErr({authId:c,source:"auth",level:"error",message:i()(a)}):n.authorizeOauth2WithPersistOption({auth:o,token:a})}};function O(e){return{type:g,payload:e}}var C=function(e){return function(t){var n=t.authActions;n.authorizeOauth2(e),n.persistAuthorizationIfNeeded()}},j=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.name,a=e.username,i=e.password,u=e.passwordType,c=e.clientId,l=e.clientSecret,f={grant_type:"password",scope:e.scopes.join(" "),username:a,password:i},h={};switch(u){case"request-body":!function(e,t,n){t&&s()(e,{client_id:t});n&&s()(e,{client_secret:n})}(f,c,l);break;case"basic":h.Authorization="Basic "+Object(p.a)(c+":"+l);break;default:console.warn("Warning: invalid passwordType ".concat(u," was passed, not including client id and secret"))}return n.authorizeRequest({body:Object(p.b)(f),url:r.get("tokenUrl"),name:o,headers:h,query:{},auth:e})}};var T=function(e){return function(t){var n=t.authActions,r=e.schema,o=e.scopes,a=e.name,i=e.clientId,u=e.clientSecret,s={Authorization:"Basic "+Object(p.a)(i+":"+u)},c={grant_type:"client_credentials",scope:o.join(" ")};return n.authorizeRequest({body:Object(p.b)(c),name:a,url:r.get("tokenUrl"),auth:e,headers:s})}},I=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,a=t.name,i=t.clientId,u=t.clientSecret,s=t.codeVerifier,c={grant_type:"authorization_code",code:t.code,client_id:i,client_secret:u,redirect_uri:n,code_verifier:s};return r.authorizeRequest({body:Object(p.b)(c),name:a,url:o.get("tokenUrl"),auth:t})}},P=function(e){var t=e.auth,n=e.redirectUrl;return function(e){var r=e.authActions,o=t.schema,a=t.name,i=t.clientId,u=t.clientSecret,s=t.codeVerifier,c={Authorization:"Basic "+Object(p.a)(i+":"+u)},l={grant_type:"authorization_code",code:t.code,client_id:i,redirect_uri:n,code_verifier:s};return r.authorizeRequest({body:Object(p.b)(l),name:a,url:o.get("tokenUrl"),auth:t,headers:c})}},N=function(e){return function(t){var n,r=t.fn,a=t.getConfigs,u=t.authActions,c=t.errActions,f=t.oas3Selectors,p=t.specSelectors,h=t.authSelectors,d=e.body,m=e.query,v=void 0===m?{}:m,g=e.headers,y=void 0===g?{}:g,b=e.name,w=e.url,x=e.auth,E=(h.getConfigs()||{}).additionalQueryStringParams;if(p.isOAS3()){var _=f.serverEffectiveValue(f.selectedServer());n=l()(w,_,!0)}else n=l()(w,p.url(),!0);"object"===o()(E)&&(n.query=s()({},n.query,E));var S=n.toString(),k=s()({Accept:"application/json, text/plain, */*","Content-Type":"application/x-www-form-urlencoded","X-Requested-With":"XMLHttpRequest"},y);r.fetch({url:S,method:"post",headers:k,query:v,body:d,requestInterceptor:a().requestInterceptor,responseInterceptor:a().responseInterceptor}).then((function(e){var t=JSON.parse(e.data),n=t&&(t.error||""),r=t&&(t.parseError||"");e.ok?n||r?c.newAuthErr({authId:b,level:"error",source:"auth",message:i()(t)}):u.authorizeOauth2WithPersistOption({auth:x,token:t}):c.newAuthErr({authId:b,level:"error",source:"auth",message:e.statusText})})).catch((function(e){var t=new Error(e).message;if(e.response&&e.response.data){var n=e.response.data;try{var r="string"==typeof n?JSON.parse(n):n;r.error&&(t+=", error: ".concat(r.error)),r.error_description&&(t+=", description: ".concat(r.error_description))}catch(e){}}c.newAuthErr({authId:b,level:"error",source:"auth",message:t})}))}};function M(e){return{type:b,payload:e}}function R(e){return{type:w,payload:e}}var D=function(){return function(e){var t=e.authSelectors;if((0,e.getConfigs)().persistAuthorization){var n=t.authorized();localStorage.setItem("authorized",i()(n.toJS()))}}}},function(e,t,n){var r=n(931);e.exports=function(e){for(var t=1;t0&&"/"!==t[0]}));function Se(e,t,n){var r;t=t||[];var o=xe.apply(void 0,s()(r=[e]).call(r,i()(t))).get("parameters",Object(I.List)());return E()(o).call(o,(function(e,t){var r=n&&"body"===t.get("in")?t.get("value_xml"):t.get("value");return e.set(Object(T.A)(t,{allowHashes:!1}),r)}),Object(I.fromJS)({}))}function ke(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(I.List.isList(e))return A()(e).call(e,(function(e){return I.Map.isMap(e)&&e.get("in")===t}))}function Ae(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";if(I.List.isList(e))return A()(e).call(e,(function(e){return I.Map.isMap(e)&&e.get("type")===t}))}function Oe(e,t){var n,r;t=t||[];var o=q(e).getIn(s()(n=["paths"]).call(n,i()(t)),Object(I.fromJS)({})),a=e.getIn(s()(r=["meta","paths"]).call(r,i()(t)),Object(I.fromJS)({})),u=Ce(e,t),c=o.get("parameters")||new I.List,l=a.get("consumes_value")?a.get("consumes_value"):Ae(c,"file")?"multipart/form-data":Ae(c,"formData")?"application/x-www-form-urlencoded":void 0;return Object(I.fromJS)({requestContentType:l,responseContentType:u})}function Ce(e,t){var n,r;t=t||[];var o=q(e).getIn(s()(n=["paths"]).call(n,i()(t)),null);if(null!==o){var a=e.getIn(s()(r=["meta","paths"]).call(r,i()(t),["produces_value"]),null),u=o.getIn(["produces",0],null);return a||u||"application/json"}}function je(e,t){var n;t=t||[];var r=q(e),a=r.getIn(s()(n=["paths"]).call(n,i()(t)),null);if(null!==a){var u=t,c=o()(u,1)[0],l=a.get("produces",null),f=r.getIn(["paths",c,"produces"],null),p=r.getIn(["produces"],null);return l||f||p}}function Te(e,t){var n;t=t||[];var r=q(e),a=r.getIn(s()(n=["paths"]).call(n,i()(t)),null);if(null!==a){var u=t,c=o()(u,1)[0],l=a.get("consumes",null),f=r.getIn(["paths",c,"consumes"],null),p=r.getIn(["consumes"],null);return l||f||p}}var Ie=function(e,t,n){var r=e.get("url").match(/^([a-z][a-z0-9+\-.]*):/),o=C()(r)?r[1]:null;return e.getIn(["scheme",t,n])||e.getIn(["scheme","_defaultScheme"])||o||""},Pe=function(e,t,n){var r;return d()(r=["http","https"]).call(r,Ie(e,t,n))>-1},Ne=function(e,t){var n;t=t||[];var r=e.getIn(s()(n=["meta","paths"]).call(n,i()(t),["parameters"]),Object(I.fromJS)([])),o=!0;return p()(r).call(r,(function(e){var t=e.get("errors");t&&t.count()&&(o=!1)})),o},Me=function(e,t){var n,r,o={requestBody:!1,requestContentType:{}},a=e.getIn(s()(n=["resolvedSubtrees","paths"]).call(n,i()(t),["requestBody"]),Object(I.fromJS)([]));return a.size<1||(a.getIn(["required"])&&(o.requestBody=a.getIn(["required"])),p()(r=a.getIn(["content"]).entrySeq()).call(r,(function(e){var t=e[0];if(e[1].getIn(["schema","required"])){var n=e[1].getIn(["schema","required"]).toJS();o.requestContentType[t]=n}}))),o},Re=function(e,t,n,r){var o;if((n||r)&&n===r)return!0;var a=e.getIn(s()(o=["resolvedSubtrees","paths"]).call(o,i()(t),["requestBody","content"]),Object(I.fromJS)([]));if(a.size<2||!n||!r)return!1;var u=a.getIn([n,"schema","properties"],Object(I.fromJS)([])),c=a.getIn([r,"schema","properties"],Object(I.fromJS)([]));return!!u.equals(c)};function De(e){return I.Map.isMap(e)?e:new I.Map}},function(e,t,n){"use strict";(function(t){var r=n(879),o=n(880),a=/^[A-Za-z][A-Za-z0-9+-.]*:\/\//,i=/^([a-z][a-z0-9.+-]*:)?(\/\/)?([\\/]+)?([\S\s]*)/i,u=/^[a-zA-Z]:/,s=new RegExp("^[\\x09\\x0A\\x0B\\x0C\\x0D\\x20\\xA0\\u1680\\u180E\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200A\\u202F\\u205F\\u3000\\u2028\\u2029\\uFEFF]+");function c(e){return(e||"").toString().replace(s,"")}var l=[["#","hash"],["?","query"],function(e,t){return h(t.protocol)?e.replace(/\\/g,"/"):e},["/","pathname"],["@","auth",1],[NaN,"host",void 0,1,1],[/:(\d+)$/,"port",void 0,1],[NaN,"hostname",void 0,1,1]],f={hash:1,query:1};function p(e){var n,r=("undefined"!=typeof window?window:void 0!==t?t:"undefined"!=typeof self?self:{}).location||{},o={},i=typeof(e=e||r);if("blob:"===e.protocol)o=new m(unescape(e.pathname),{});else if("string"===i)for(n in o=new m(e,{}),f)delete o[n];else if("object"===i){for(n in e)n in f||(o[n]=e[n]);void 0===o.slashes&&(o.slashes=a.test(e.href))}return o}function h(e){return"file:"===e||"ftp:"===e||"http:"===e||"https:"===e||"ws:"===e||"wss:"===e}function d(e,t){e=c(e),t=t||{};var n,r=i.exec(e),o=r[1]?r[1].toLowerCase():"",a=!!r[2],u=!!r[3],s=0;return a?u?(n=r[2]+r[3]+r[4],s=r[2].length+r[3].length):(n=r[2]+r[4],s=r[2].length):u?(n=r[3]+r[4],s=r[3].length):n=r[4],"file:"===o?s>=2&&(n=n.slice(2)):h(o)?n=r[4]:o?a&&(n=n.slice(2)):s>=2&&h(t.protocol)&&(n=r[4]),{protocol:o,slashes:a||h(o),slashesCount:s,rest:n}}function m(e,t,n){if(e=c(e),!(this instanceof m))return new m(e,t,n);var a,i,s,f,v,g,y=l.slice(),b=typeof t,w=this,x=0;for("object"!==b&&"string"!==b&&(n=t,t=null),n&&"function"!=typeof n&&(n=o.parse),a=!(i=d(e||"",t=p(t))).protocol&&!i.slashes,w.slashes=i.slashes||a&&t.slashes,w.protocol=i.protocol||t.protocol||"",e=i.rest,("file:"===i.protocol&&(2!==i.slashesCount||u.test(e))||!i.slashes&&(i.protocol||i.slashesCount<2||!h(w.protocol)))&&(y[3]=[/(.*)/,"pathname"]);x=4?[t[0],t[1],t[2],t[3],"".concat(t[0],".").concat(t[1]),"".concat(t[0],".").concat(t[2]),"".concat(t[0],".").concat(t[3]),"".concat(t[1],".").concat(t[0]),"".concat(t[1],".").concat(t[2]),"".concat(t[1],".").concat(t[3]),"".concat(t[2],".").concat(t[0]),"".concat(t[2],".").concat(t[1]),"".concat(t[2],".").concat(t[3]),"".concat(t[3],".").concat(t[0]),"".concat(t[3],".").concat(t[1]),"".concat(t[3],".").concat(t[2]),"".concat(t[0],".").concat(t[1],".").concat(t[2]),"".concat(t[0],".").concat(t[1],".").concat(t[3]),"".concat(t[0],".").concat(t[2],".").concat(t[1]),"".concat(t[0],".").concat(t[2],".").concat(t[3]),"".concat(t[0],".").concat(t[3],".").concat(t[1]),"".concat(t[0],".").concat(t[3],".").concat(t[2]),"".concat(t[1],".").concat(t[0],".").concat(t[2]),"".concat(t[1],".").concat(t[0],".").concat(t[3]),"".concat(t[1],".").concat(t[2],".").concat(t[0]),"".concat(t[1],".").concat(t[2],".").concat(t[3]),"".concat(t[1],".").concat(t[3],".").concat(t[0]),"".concat(t[1],".").concat(t[3],".").concat(t[2]),"".concat(t[2],".").concat(t[0],".").concat(t[1]),"".concat(t[2],".").concat(t[0],".").concat(t[3]),"".concat(t[2],".").concat(t[1],".").concat(t[0]),"".concat(t[2],".").concat(t[1],".").concat(t[3]),"".concat(t[2],".").concat(t[3],".").concat(t[0]),"".concat(t[2],".").concat(t[3],".").concat(t[1]),"".concat(t[3],".").concat(t[0],".").concat(t[1]),"".concat(t[3],".").concat(t[0],".").concat(t[2]),"".concat(t[3],".").concat(t[1],".").concat(t[0]),"".concat(t[3],".").concat(t[1],".").concat(t[2]),"".concat(t[3],".").concat(t[2],".").concat(t[0]),"".concat(t[3],".").concat(t[2],".").concat(t[1]),"".concat(t[0],".").concat(t[1],".").concat(t[2],".").concat(t[3]),"".concat(t[0],".").concat(t[1],".").concat(t[3],".").concat(t[2]),"".concat(t[0],".").concat(t[2],".").concat(t[1],".").concat(t[3]),"".concat(t[0],".").concat(t[2],".").concat(t[3],".").concat(t[1]),"".concat(t[0],".").concat(t[3],".").concat(t[1],".").concat(t[2]),"".concat(t[0],".").concat(t[3],".").concat(t[2],".").concat(t[1]),"".concat(t[1],".").concat(t[0],".").concat(t[2],".").concat(t[3]),"".concat(t[1],".").concat(t[0],".").concat(t[3],".").concat(t[2]),"".concat(t[1],".").concat(t[2],".").concat(t[0],".").concat(t[3]),"".concat(t[1],".").concat(t[2],".").concat(t[3],".").concat(t[0]),"".concat(t[1],".").concat(t[3],".").concat(t[0],".").concat(t[2]),"".concat(t[1],".").concat(t[3],".").concat(t[2],".").concat(t[0]),"".concat(t[2],".").concat(t[0],".").concat(t[1],".").concat(t[3]),"".concat(t[2],".").concat(t[0],".").concat(t[3],".").concat(t[1]),"".concat(t[2],".").concat(t[1],".").concat(t[0],".").concat(t[3]),"".concat(t[2],".").concat(t[1],".").concat(t[3],".").concat(t[0]),"".concat(t[2],".").concat(t[3],".").concat(t[0],".").concat(t[1]),"".concat(t[2],".").concat(t[3],".").concat(t[1],".").concat(t[0]),"".concat(t[3],".").concat(t[0],".").concat(t[1],".").concat(t[2]),"".concat(t[3],".").concat(t[0],".").concat(t[2],".").concat(t[1]),"".concat(t[3],".").concat(t[1],".").concat(t[0],".").concat(t[2]),"".concat(t[3],".").concat(t[1],".").concat(t[2],".").concat(t[0]),"".concat(t[3],".").concat(t[2],".").concat(t[0],".").concat(t[1]),"".concat(t[3],".").concat(t[2],".").concat(t[1],".").concat(t[0])]:void 0),g[r]}function b(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},n=arguments.length>2?arguments[2]:void 0,r=e.filter((function(e){return"token"!==e})),o=y(r);return o.reduce((function(e,t){return p()({},e,n[t])}),t)}function w(e){return e.join(" ")}function x(e){var t=e.node,n=e.stylesheet,r=e.style,o=void 0===r?{}:r,a=e.useInlineStyles,i=e.key,u=t.properties,s=t.type,c=t.tagName,l=t.value;if("text"===s)return l;if(c){var f,h=function(e,t){var n=0;return function(r){return n+=1,r.map((function(r,o){return x({node:r,stylesheet:e,useInlineStyles:t,key:"code-segment-".concat(n,"-").concat(o)})}))}}(n,a);if(a){var m=Object.keys(n).reduce((function(e,t){return t.split(".").forEach((function(t){e.includes(t)||e.push(t)})),e}),[]),g=u.className&&u.className.includes("token")?["token"]:[],y=u.className&&g.concat(u.className.filter((function(e){return!m.includes(e)})));f=p()({},u,{className:w(y)||void 0,style:b(u.className,Object.assign({},u.style,o),n)})}else f=p()({},u,{className:w(u.className)});var E=h(t.children);return d.a.createElement(c,v()({key:i},f),E)}}var E=/\n/g;function _(e){var t=e.codeString,n=e.codeStyle,r=e.containerStyle,o=void 0===r?{float:"left",paddingRight:"10px"}:r,a=e.numberStyle,i=void 0===a?{}:a,u=e.startingLineNumber;return d.a.createElement("code",{style:Object.assign({},n,o)},function(e){var t=e.lines,n=e.startingLineNumber,r=e.style;return t.map((function(e,t){var o=t+n;return d.a.createElement("span",{key:"line-".concat(t),className:"react-syntax-highlighter-line-number",style:"function"==typeof r?r(o):r},"".concat(o,"\n"))}))}({lines:t.replace(/\n$/,"").split("\n"),style:i,startingLineNumber:u}))}function S(e,t){return{type:"element",tagName:"span",properties:{key:"line-number--".concat(e),className:["comment","linenumber","react-syntax-highlighter-line-number"],style:t},children:[{type:"text",value:e}]}}function k(e,t,n){var r,o={display:"inline-block",minWidth:(r=n,"".concat(r.toString().length,".25em")),paddingRight:"1em",textAlign:"right",userSelect:"none"},a="function"==typeof e?e(t):e;return p()({},o,a)}function A(e){var t=e.children,n=e.lineNumber,r=e.lineNumberStyle,o=e.largestLineNumber,a=e.showInlineLineNumbers,i=e.lineProps,u=void 0===i?{}:i,s=e.className,c=void 0===s?[]:s,l=e.showLineNumbers,f=e.wrapLongLines,h="function"==typeof u?u(n):u;if(h.className=c,n&&a){var d=k(r,n,o);t.unshift(S(n,d))}return f&l&&(h.style=p()({},h.style,{display:"flex"})),{type:"element",tagName:"span",properties:h,children:t}}function O(e){for(var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:[],n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[],r=0;r2&&void 0!==arguments[2]?arguments[2]:[];return A({children:e,lineNumber:t,lineNumberStyle:u,largestLineNumber:i,showInlineLineNumbers:o,lineProps:n,className:a,showLineNumbers:r,wrapLongLines:s})}function m(e,t){if(r&&t&&o){var n=k(u,t,i);e.unshift(S(t,n))}return e}function v(e,n){var r=arguments.length>2&&void 0!==arguments[2]?arguments[2]:[];return t||r.length>0?d(e,n,r):m(e,n)}for(var g=function(){var e=l[h],t=e.children[0].value;if(t.match(E)){var n=t.split("\n");n.forEach((function(t,o){var i=r&&f.length+a,u={type:"text",value:"".concat(t,"\n")};if(0===o){var s=v(l.slice(p+1,h).concat(A({children:[u],className:e.properties.className})),i);f.push(s)}else if(o===n.length-1){if(l[h+1]&&l[h+1].children&&l[h+1].children[0]){var c=A({children:[{type:"text",value:"".concat(t)}],className:e.properties.className});l.splice(h+1,0,c)}else{var d=v([u],i,e.properties.className);f.push(d)}}else{var m=v([u],i,e.properties.className);f.push(m)}})),p=h}h++};h .hljs-title":{color:"#88C0D0"},"hljs-keyword":{color:"#81A1C1"},"hljs-literal":{color:"#81A1C1"},"hljs-symbol":{color:"#81A1C1"},"hljs-number":{color:"#B48EAD"},"hljs-regexp":{color:"#EBCB8B"},"hljs-string":{color:"#A3BE8C"},"hljs-title":{color:"#8FBCBB"},"hljs-params":{color:"#D8DEE9"},"hljs-bullet":{color:"#81A1C1"},"hljs-code":{color:"#8FBCBB"},"hljs-emphasis":{fontStyle:"italic"},"hljs-formula":{color:"#8FBCBB"},"hljs-strong":{fontWeight:"bold"},"hljs-link:hover":{textDecoration:"underline"},"hljs-quote":{color:"#4C566A"},"hljs-comment":{color:"#4C566A"},"hljs-doctag":{color:"#8FBCBB"},"hljs-meta":{color:"#5E81AC"},"hljs-meta-keyword":{color:"#5E81AC"},"hljs-meta-string":{color:"#A3BE8C"},"hljs-attr":{color:"#8FBCBB"},"hljs-attribute":{color:"#D8DEE9"},"hljs-builtin-name":{color:"#81A1C1"},"hljs-name":{color:"#81A1C1"},"hljs-section":{color:"#88C0D0"},"hljs-tag":{color:"#81A1C1"},"hljs-variable":{color:"#D8DEE9"},"hljs-template-variable":{color:"#D8DEE9"},"hljs-template-tag":{color:"#5E81AC"},"abnf .hljs-attribute":{color:"#88C0D0"},"abnf .hljs-symbol":{color:"#EBCB8B"},"apache .hljs-attribute":{color:"#88C0D0"},"apache .hljs-section":{color:"#81A1C1"},"arduino .hljs-built_in":{color:"#88C0D0"},"aspectj .hljs-meta":{color:"#D08770"},"aspectj > .hljs-title":{color:"#88C0D0"},"bnf .hljs-attribute":{color:"#8FBCBB"},"clojure .hljs-name":{color:"#88C0D0"},"clojure .hljs-symbol":{color:"#EBCB8B"},"coq .hljs-built_in":{color:"#88C0D0"},"cpp .hljs-meta-string":{color:"#8FBCBB"},"css .hljs-built_in":{color:"#88C0D0"},"css .hljs-keyword":{color:"#D08770"},"diff .hljs-meta":{color:"#8FBCBB"},"ebnf .hljs-attribute":{color:"#8FBCBB"},"glsl .hljs-built_in":{color:"#88C0D0"},"groovy .hljs-meta:not(:first-child)":{color:"#D08770"},"haxe .hljs-meta":{color:"#D08770"},"java .hljs-meta":{color:"#D08770"},"ldif .hljs-attribute":{color:"#8FBCBB"},"lisp .hljs-name":{color:"#88C0D0"},"lua .hljs-built_in":{color:"#88C0D0"},"moonscript .hljs-built_in":{color:"#88C0D0"},"nginx .hljs-attribute":{color:"#88C0D0"},"nginx .hljs-section":{color:"#5E81AC"},"pf .hljs-built_in":{color:"#88C0D0"},"processing .hljs-built_in":{color:"#88C0D0"},"scss .hljs-keyword":{color:"#81A1C1"},"stylus .hljs-keyword":{color:"#81A1C1"},"swift .hljs-meta":{color:"#D08770"},"vim .hljs-built_in":{color:"#88C0D0",fontStyle:"italic"},"yaml .hljs-meta":{color:"#D08770"}},obsidian:{hljs:{display:"block",overflowX:"auto",padding:"0.5em",background:"#282b2e",color:"#e0e2e4"},"hljs-keyword":{color:"#93c763",fontWeight:"bold"},"hljs-selector-tag":{color:"#93c763",fontWeight:"bold"},"hljs-literal":{color:"#93c763",fontWeight:"bold"},"hljs-selector-id":{color:"#93c763"},"hljs-number":{color:"#ffcd22"},"hljs-attribute":{color:"#668bb0"},"hljs-code":{color:"white"},"hljs-class .hljs-title":{color:"white"},"hljs-section":{color:"white",fontWeight:"bold"},"hljs-regexp":{color:"#d39745"},"hljs-link":{color:"#d39745"},"hljs-meta":{color:"#557182"},"hljs-tag":{color:"#8cbbad"},"hljs-name":{color:"#8cbbad",fontWeight:"bold"},"hljs-bullet":{color:"#8cbbad"},"hljs-subst":{color:"#8cbbad"},"hljs-emphasis":{color:"#8cbbad"},"hljs-type":{color:"#8cbbad",fontWeight:"bold"},"hljs-built_in":{color:"#8cbbad"},"hljs-selector-attr":{color:"#8cbbad"},"hljs-selector-pseudo":{color:"#8cbbad"},"hljs-addition":{color:"#8cbbad"},"hljs-variable":{color:"#8cbbad"},"hljs-template-tag":{color:"#8cbbad"},"hljs-template-variable":{color:"#8cbbad"},"hljs-string":{color:"#ec7600"},"hljs-symbol":{color:"#ec7600"},"hljs-comment":{color:"#818e96"},"hljs-quote":{color:"#818e96"},"hljs-deletion":{color:"#818e96"},"hljs-selector-class":{color:"#A082BD"},"hljs-doctag":{fontWeight:"bold"},"hljs-title":{fontWeight:"bold"},"hljs-strong":{fontWeight:"bold"}},"tomorrow-night":{"hljs-comment":{color:"#969896"},"hljs-quote":{color:"#969896"},"hljs-variable":{color:"#cc6666"},"hljs-template-variable":{color:"#cc6666"},"hljs-tag":{color:"#cc6666"},"hljs-name":{color:"#cc6666"},"hljs-selector-id":{color:"#cc6666"},"hljs-selector-class":{color:"#cc6666"},"hljs-regexp":{color:"#cc6666"},"hljs-deletion":{color:"#cc6666"},"hljs-number":{color:"#de935f"},"hljs-built_in":{color:"#de935f"},"hljs-builtin-name":{color:"#de935f"},"hljs-literal":{color:"#de935f"},"hljs-type":{color:"#de935f"},"hljs-params":{color:"#de935f"},"hljs-meta":{color:"#de935f"},"hljs-link":{color:"#de935f"},"hljs-attribute":{color:"#f0c674"},"hljs-string":{color:"#b5bd68"},"hljs-symbol":{color:"#b5bd68"},"hljs-bullet":{color:"#b5bd68"},"hljs-addition":{color:"#b5bd68"},"hljs-title":{color:"#81a2be"},"hljs-section":{color:"#81a2be"},"hljs-keyword":{color:"#b294bb"},"hljs-selector-tag":{color:"#b294bb"},hljs:{display:"block",overflowX:"auto",background:"#1d1f21",color:"#c5c8c6",padding:"0.5em"},"hljs-emphasis":{fontStyle:"italic"},"hljs-strong":{fontWeight:"bold"}}},X=o()(Z),ee=function(e){return i()(X).call(X,e)?Z[e]:(console.warn("Request style '".concat(e,"' is not available, returning default instead")),Q)}},function(e,t,n){"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.File=t.Blob=t.FormData=void 0;const r="undefined"!=typeof globalThis?globalThis:"undefined"!=typeof self?self:window;t.FormData=r.FormData,t.Blob=r.Blob,t.File=r.File},function(e,t,n){var r=n(147),o=Function.prototype,a=o.apply,i=o.call;e.exports="object"==typeof Reflect&&Reflect.apply||(r?i.bind(a):function(){return i.apply(a,arguments)})},function(e,t,n){var r=n(51),o=n(37),a=n(175),i=n(98),u=n(66),s=n(177),c=n(47),l=n(334),f=Object.getOwnPropertyDescriptor;t.f=r?f:function(e,t){if(e=u(e),t=s(t),l)try{return f(e,t)}catch(e){}if(c(e,t))return i(!o(a.f,e,t),e[t])}},function(e,t){e.exports=function(e,t){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:t}}},function(e,t,n){var r=n(57);e.exports=r("navigator","userAgent")||""},function(e,t){},function(e,t,n){var r,o=n(33),a=n(181),i=n(228),u=n(152),s=n(340),c=n(223),l=n(182),f=l("IE_PROTO"),p=function(){},h=function(e){return"